max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
tests/handlers/bob/test_rebuild_images_on_image_advisory_change.py | mulaievaRH/freshmaker | 5 | 6633151 | <reponame>mulaievaRH/freshmaker
# Copyright (c) 2019 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Written by <NAME> <<EMAIL>>
from unittest.mock import patch, MagicMock
import freshmaker
from freshmaker.errata import ErrataAdvisory
from freshmaker.events import (ErrataAdvisoryStateChangedEvent,
ManualRebuildWithAdvisoryEvent)
from freshmaker.handlers.bob import RebuildImagesOnImageAdvisoryChange
from freshmaker import models, db, conf
from tests import helpers
class RebuildImagesOnImageAdvisoryChangeTest(helpers.ModelsTestCase):
def setUp(self):
super(RebuildImagesOnImageAdvisoryChangeTest, self).setUp()
self.event = ErrataAdvisoryStateChangedEvent(
"123",
ErrataAdvisory(123, "RHBA-2017", "SHIPPED_LIVE", [],
security_impact="",
product_short_name="product"))
self.handler = RebuildImagesOnImageAdvisoryChange()
self.db_event = models.Event.get_or_create(
db.session, self.event.msg_id, self.event.search_key,
self.event.__class__)
def test_can_handle(self):
self.event.advisory.content_types = ["docker"]
ret = self.handler.can_handle(self.event)
self.assertTrue(ret)
def test_can_handle_manual_event(self):
event = ManualRebuildWithAdvisoryEvent(
"123",
ErrataAdvisory(123, "RHBA-2017", "SHIPPED_LIVE", ["docker"],
security_impact="",
product_short_name="product"),
[])
ret = self.handler.can_handle(event)
self.assertTrue(ret)
def test_can_handle_non_docker_advisory(self):
self.event.advisory.content_types = ["rpm"]
ret = self.handler.can_handle(self.event)
self.assertFalse(ret)
@patch.object(freshmaker.conf, 'handler_build_allowlist', new={
'RebuildImagesOnImageAdvisoryChange': {
"image": {"advisory_state": "SHIPPED_LIVE"}
}
})
@patch("freshmaker.handlers.bob.RebuildImagesOnImageAdvisoryChange."
"rebuild_images_depending_on_advisory")
def test_handler_allowed(self, rebuild_images):
self.event.advisory.state = "NEW_FILES"
self.handler.handle(self.event)
rebuild_images.assert_not_called()
self.event.advisory.state = "SHIPPED_LIVE"
self.handler.handle(self.event)
rebuild_images.assert_called_once()
@patch("freshmaker.errata.Errata.get_docker_repo_tags")
@patch("freshmaker.pulp.Pulp.get_docker_repository_name")
@patch("freshmaker.handlers.bob."
"rebuild_images_on_image_advisory_change.requests.get")
@patch.object(freshmaker.conf, 'bob_auth_token', new="x")
@patch.object(freshmaker.conf, 'bob_server_url', new="http://localhost/")
def test_rebuild_images_depending_on_advisory(
self, requests_get, get_docker_repository_name,
get_docker_repo_tags):
get_docker_repo_tags.return_value = {
'foo-container-1-1': {'foo-526': ['5.26', 'latest']},
'bar-container-1-1': {'bar-526': ['5.26', 'latest']}}
get_docker_repository_name.side_effect = [
"scl/foo-526", "scl/bar-526"]
resp1 = MagicMock()
resp1.json.return_value = {
"message": "Foobar",
"impacted": ["bob/repo1", "bob/repo2"]}
resp2 = MagicMock()
resp2.json.return_value = {
"message": "Foobar",
"impacted": ["bob/repo3", "bob/repo4"]}
requests_get.side_effect = [resp1, resp2]
self.handler.rebuild_images_depending_on_advisory(self.db_event, 123)
get_docker_repo_tags.assert_called_once_with(123)
get_docker_repository_name.assert_any_call("bar-526")
get_docker_repository_name.assert_any_call("foo-526")
requests_get.assert_any_call(
'http://localhost/update_children/scl/foo-526',
headers={'Authorization': 'Bearer x'},
timeout=conf.requests_timeout)
requests_get.assert_any_call(
'http://localhost/update_children/scl/bar-526',
headers={'Authorization': 'Bearer x'},
timeout=conf.requests_timeout)
db.session.refresh(self.db_event)
self.assertEqual(self.db_event.state, models.EventState.COMPLETE.value)
builds = set([b.name for b in self.db_event.builds])
self.assertEqual(builds, set(['scl/foo-526', 'scl/bar-526',
'bob/repo1', 'bob/repo2',
'bob/repo3', 'bob/repo4']))
for build in self.db_event.builds:
if build in ['bob/repo1', 'bob/repo2']:
self.assertEqual(build.dep_on.name == "scl/foo-526")
elif build in ['bob/repo3', 'bob/repo4']:
self.assertEqual(build.dep_on.name == "scl/bar-526")
@patch("freshmaker.errata.Errata.get_docker_repo_tags")
@patch("freshmaker.pulp.Pulp.get_docker_repository_name")
@patch("freshmaker.handlers.bob."
"rebuild_images_on_image_advisory_change.requests.get")
@patch.object(freshmaker.conf, 'bob_auth_token', new="x")
@patch.object(freshmaker.conf, 'bob_server_url', new="http://localhost/")
def test_rebuild_images_depending_on_advisory_unknown_advisory(
self, requests_get, get_docker_repository_name,
get_docker_repo_tags):
get_docker_repo_tags.return_value = None
self.handler.rebuild_images_depending_on_advisory(self.db_event, 123)
get_docker_repo_tags.assert_called_once_with(123)
get_docker_repository_name.assert_not_called()
requests_get.assert_not_called()
@patch("freshmaker.errata.Errata.get_docker_repo_tags")
@patch("freshmaker.pulp.Pulp.get_docker_repository_name")
@patch("freshmaker.handlers.bob."
"rebuild_images_on_image_advisory_change.requests.get")
@patch.object(freshmaker.conf, 'bob_auth_token', new="x")
@patch.object(freshmaker.conf, 'bob_server_url', new="http://localhost/")
def test_rebuild_images_depending_on_advisory_dry_run(
self, requests_get, get_docker_repository_name,
get_docker_repo_tags):
get_docker_repo_tags.return_value = {
'foo-container-1-1': {'foo-526': ['5.26', 'latest']}}
get_docker_repository_name.return_value = "scl/foo-526"
self.handler.force_dry_run()
self.handler.rebuild_images_depending_on_advisory(self.db_event, 123)
get_docker_repo_tags.assert_called_once_with(123)
get_docker_repository_name.assert_called_once_with("foo-526")
requests_get.assert_not_called()
| # Copyright (c) 2019 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Written by <NAME> <<EMAIL>>
from unittest.mock import patch, MagicMock
import freshmaker
from freshmaker.errata import ErrataAdvisory
from freshmaker.events import (ErrataAdvisoryStateChangedEvent,
ManualRebuildWithAdvisoryEvent)
from freshmaker.handlers.bob import RebuildImagesOnImageAdvisoryChange
from freshmaker import models, db, conf
from tests import helpers
class RebuildImagesOnImageAdvisoryChangeTest(helpers.ModelsTestCase):
def setUp(self):
super(RebuildImagesOnImageAdvisoryChangeTest, self).setUp()
self.event = ErrataAdvisoryStateChangedEvent(
"123",
ErrataAdvisory(123, "RHBA-2017", "SHIPPED_LIVE", [],
security_impact="",
product_short_name="product"))
self.handler = RebuildImagesOnImageAdvisoryChange()
self.db_event = models.Event.get_or_create(
db.session, self.event.msg_id, self.event.search_key,
self.event.__class__)
def test_can_handle(self):
self.event.advisory.content_types = ["docker"]
ret = self.handler.can_handle(self.event)
self.assertTrue(ret)
def test_can_handle_manual_event(self):
event = ManualRebuildWithAdvisoryEvent(
"123",
ErrataAdvisory(123, "RHBA-2017", "SHIPPED_LIVE", ["docker"],
security_impact="",
product_short_name="product"),
[])
ret = self.handler.can_handle(event)
self.assertTrue(ret)
def test_can_handle_non_docker_advisory(self):
self.event.advisory.content_types = ["rpm"]
ret = self.handler.can_handle(self.event)
self.assertFalse(ret)
@patch.object(freshmaker.conf, 'handler_build_allowlist', new={
'RebuildImagesOnImageAdvisoryChange': {
"image": {"advisory_state": "SHIPPED_LIVE"}
}
})
@patch("freshmaker.handlers.bob.RebuildImagesOnImageAdvisoryChange."
"rebuild_images_depending_on_advisory")
def test_handler_allowed(self, rebuild_images):
self.event.advisory.state = "NEW_FILES"
self.handler.handle(self.event)
rebuild_images.assert_not_called()
self.event.advisory.state = "SHIPPED_LIVE"
self.handler.handle(self.event)
rebuild_images.assert_called_once()
@patch("freshmaker.errata.Errata.get_docker_repo_tags")
@patch("freshmaker.pulp.Pulp.get_docker_repository_name")
@patch("freshmaker.handlers.bob."
"rebuild_images_on_image_advisory_change.requests.get")
@patch.object(freshmaker.conf, 'bob_auth_token', new="x")
@patch.object(freshmaker.conf, 'bob_server_url', new="http://localhost/")
def test_rebuild_images_depending_on_advisory(
self, requests_get, get_docker_repository_name,
get_docker_repo_tags):
get_docker_repo_tags.return_value = {
'foo-container-1-1': {'foo-526': ['5.26', 'latest']},
'bar-container-1-1': {'bar-526': ['5.26', 'latest']}}
get_docker_repository_name.side_effect = [
"scl/foo-526", "scl/bar-526"]
resp1 = MagicMock()
resp1.json.return_value = {
"message": "Foobar",
"impacted": ["bob/repo1", "bob/repo2"]}
resp2 = MagicMock()
resp2.json.return_value = {
"message": "Foobar",
"impacted": ["bob/repo3", "bob/repo4"]}
requests_get.side_effect = [resp1, resp2]
self.handler.rebuild_images_depending_on_advisory(self.db_event, 123)
get_docker_repo_tags.assert_called_once_with(123)
get_docker_repository_name.assert_any_call("bar-526")
get_docker_repository_name.assert_any_call("foo-526")
requests_get.assert_any_call(
'http://localhost/update_children/scl/foo-526',
headers={'Authorization': 'Bearer x'},
timeout=conf.requests_timeout)
requests_get.assert_any_call(
'http://localhost/update_children/scl/bar-526',
headers={'Authorization': 'Bearer x'},
timeout=conf.requests_timeout)
db.session.refresh(self.db_event)
self.assertEqual(self.db_event.state, models.EventState.COMPLETE.value)
builds = set([b.name for b in self.db_event.builds])
self.assertEqual(builds, set(['scl/foo-526', 'scl/bar-526',
'bob/repo1', 'bob/repo2',
'bob/repo3', 'bob/repo4']))
for build in self.db_event.builds:
if build in ['bob/repo1', 'bob/repo2']:
self.assertEqual(build.dep_on.name == "scl/foo-526")
elif build in ['bob/repo3', 'bob/repo4']:
self.assertEqual(build.dep_on.name == "scl/bar-526")
@patch("freshmaker.errata.Errata.get_docker_repo_tags")
@patch("freshmaker.pulp.Pulp.get_docker_repository_name")
@patch("freshmaker.handlers.bob."
"rebuild_images_on_image_advisory_change.requests.get")
@patch.object(freshmaker.conf, 'bob_auth_token', new="x")
@patch.object(freshmaker.conf, 'bob_server_url', new="http://localhost/")
def test_rebuild_images_depending_on_advisory_unknown_advisory(
self, requests_get, get_docker_repository_name,
get_docker_repo_tags):
get_docker_repo_tags.return_value = None
self.handler.rebuild_images_depending_on_advisory(self.db_event, 123)
get_docker_repo_tags.assert_called_once_with(123)
get_docker_repository_name.assert_not_called()
requests_get.assert_not_called()
@patch("freshmaker.errata.Errata.get_docker_repo_tags")
@patch("freshmaker.pulp.Pulp.get_docker_repository_name")
@patch("freshmaker.handlers.bob."
"rebuild_images_on_image_advisory_change.requests.get")
@patch.object(freshmaker.conf, 'bob_auth_token', new="x")
@patch.object(freshmaker.conf, 'bob_server_url', new="http://localhost/")
def test_rebuild_images_depending_on_advisory_dry_run(
self, requests_get, get_docker_repository_name,
get_docker_repo_tags):
get_docker_repo_tags.return_value = {
'foo-container-1-1': {'foo-526': ['5.26', 'latest']}}
get_docker_repository_name.return_value = "scl/foo-526"
self.handler.force_dry_run()
self.handler.rebuild_images_depending_on_advisory(self.db_event, 123)
get_docker_repo_tags.assert_called_once_with(123)
get_docker_repository_name.assert_called_once_with("foo-526")
requests_get.assert_not_called() | en | 0.77001 | # Copyright (c) 2019 Red Hat, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Written by <NAME> <<EMAIL>> | 1.309844 | 1 |
examples/mongo_ormexample.py | abhinavabcd/blaster | 4 | 6633152 | <reponame>abhinavabcd/blaster<gh_stars>1-10
from blaster.common_funcs_and_datastructures import get_random_id, cur_ms
from blaster.config import IS_DEV
from blaster.mongo_orm import Connection, Model, Attribute, init_mongo_cluster
#step 1
class SalesAgent(Model):
_collection_name_ = "domain_agents"
company = Attribute(str)
agent_user_id = Attribute(str)
auth_level = Attribute(int, default=1)
load = Attribute(int, default=0)
data = Attribute(dict)
created_at = Attribute(int , default=cur_ms)
updated_at = Attribute(int, default=cur_ms)
def before_update(self):
self.updated_at = cur_ms()
_index_ = [
(company, agent_user_id),
(company, load, {"unique": False})
]
#call this function after you import all mongo db classes/models
def initialize_mongo():
nodes = []
if IS_DEV:
nodes.append(Connection(host="localhost", port=27017, db_name="xyz"))
else:
nodes.append(Connection(host="mongodb://mongo-0.mongo.default.svc/", port=27017, db_name="xyz"))
#check if connection exists
nodes[0].db.client.server_info()
init_mongo_cluster(nodes)
if __name__ == "__main__":
initialize_mongo()
for i in range(10):
sales_agent = SalesAgent(company="example", agent_user_id=get_random_id(5), data={}, load=i).commit()
sales_agent.data["says"] = "hello"
sales_agent.commit()
sales_agent.auth_level = 2
sales_agent.commit()
print(sales_agent.to_dict())
#query single item only
sales_agent = SalesAgent.get(company="example")
print(sales_agent.to_dict())
#direct mongo query
sales_agents = SalesAgent.query({"company": "example", "agent_user_id": {"$in": ["abcd", "pqrs"]}})
#wont have any
for i in sales_agents:
print(i.to_dict())
#this rreturns a map
sales_agents = SalesAgent.query({"company": "example"})
for i in sales_agents:
print(i.to_dict())
| from blaster.common_funcs_and_datastructures import get_random_id, cur_ms
from blaster.config import IS_DEV
from blaster.mongo_orm import Connection, Model, Attribute, init_mongo_cluster
#step 1
class SalesAgent(Model):
_collection_name_ = "domain_agents"
company = Attribute(str)
agent_user_id = Attribute(str)
auth_level = Attribute(int, default=1)
load = Attribute(int, default=0)
data = Attribute(dict)
created_at = Attribute(int , default=cur_ms)
updated_at = Attribute(int, default=cur_ms)
def before_update(self):
self.updated_at = cur_ms()
_index_ = [
(company, agent_user_id),
(company, load, {"unique": False})
]
#call this function after you import all mongo db classes/models
def initialize_mongo():
nodes = []
if IS_DEV:
nodes.append(Connection(host="localhost", port=27017, db_name="xyz"))
else:
nodes.append(Connection(host="mongodb://mongo-0.mongo.default.svc/", port=27017, db_name="xyz"))
#check if connection exists
nodes[0].db.client.server_info()
init_mongo_cluster(nodes)
if __name__ == "__main__":
initialize_mongo()
for i in range(10):
sales_agent = SalesAgent(company="example", agent_user_id=get_random_id(5), data={}, load=i).commit()
sales_agent.data["says"] = "hello"
sales_agent.commit()
sales_agent.auth_level = 2
sales_agent.commit()
print(sales_agent.to_dict())
#query single item only
sales_agent = SalesAgent.get(company="example")
print(sales_agent.to_dict())
#direct mongo query
sales_agents = SalesAgent.query({"company": "example", "agent_user_id": {"$in": ["abcd", "pqrs"]}})
#wont have any
for i in sales_agents:
print(i.to_dict())
#this rreturns a map
sales_agents = SalesAgent.query({"company": "example"})
for i in sales_agents:
print(i.to_dict()) | en | 0.53642 | #step 1 #call this function after you import all mongo db classes/models #check if connection exists #query single item only #direct mongo query #wont have any #this rreturns a map | 2.423315 | 2 |
qa_testing/unit_tests.py | chrisasa/freetrade-portofolio-screener | 0 | 6633153 | import configparser
import logging
import finnhub
config = configparser.ConfigParser()
config.read('configs/config_sec.ini')
api_key = config['finnhub']['ApiToken']
def TEST_get_stock_price():
stock_symbol = 'AMZN'
rsp = finnhub.get_stock_price(stock_symbol)
logging.info(rsp)
if rsp <= 0 and not rsp:
logging.error("Stock symbol all capital: FAIL")
else:
logging.info("Stock symbol all capital: SUCCESS")
rsp = finnhub.get_stock_price(stock_symbol.lower())
logging.info(rsp)
if rsp <= 0 and not rsp:
logging.error("Stock symbol all lowercase: FAIL")
else:
logging.info("Stock symbol all lowercase: SUCCESS")
def run_finnhub_tests():
TEST_get_stock_price()
def main():
logging_format = "%(asctime)s: %(message)s"
# logging_format = "%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s"
# logging_format = "%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(name)s | %(message)s"
# logging.basicConfig(handlers=[logging.FileHandler(all_logs_file_path),logging.StreamHandler()], format=logging_format, level=logging.INFO, datefmt="%H:%M:%S")
logging.basicConfig(format=logging_format, level=logging.INFO, datefmt="%H:%M:%S")
run_finnhub_tests()
main() | import configparser
import logging
import finnhub
config = configparser.ConfigParser()
config.read('configs/config_sec.ini')
api_key = config['finnhub']['ApiToken']
def TEST_get_stock_price():
stock_symbol = 'AMZN'
rsp = finnhub.get_stock_price(stock_symbol)
logging.info(rsp)
if rsp <= 0 and not rsp:
logging.error("Stock symbol all capital: FAIL")
else:
logging.info("Stock symbol all capital: SUCCESS")
rsp = finnhub.get_stock_price(stock_symbol.lower())
logging.info(rsp)
if rsp <= 0 and not rsp:
logging.error("Stock symbol all lowercase: FAIL")
else:
logging.info("Stock symbol all lowercase: SUCCESS")
def run_finnhub_tests():
TEST_get_stock_price()
def main():
logging_format = "%(asctime)s: %(message)s"
# logging_format = "%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s"
# logging_format = "%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(name)s | %(message)s"
# logging.basicConfig(handlers=[logging.FileHandler(all_logs_file_path),logging.StreamHandler()], format=logging_format, level=logging.INFO, datefmt="%H:%M:%S")
logging.basicConfig(format=logging_format, level=logging.INFO, datefmt="%H:%M:%S")
run_finnhub_tests()
main() | en | 0.407636 | # logging_format = "%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s" # logging_format = "%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(name)s | %(message)s" # logging.basicConfig(handlers=[logging.FileHandler(all_logs_file_path),logging.StreamHandler()], format=logging_format, level=logging.INFO, datefmt="%H:%M:%S") | 2.53853 | 3 |
scripts/parse_ip.py | taojian2009/riverboat | 0 | 6633154 | <reponame>taojian2009/riverboat
import pandas as pd
from server.utils import parse_location
from config import Config
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from server.model import RequestLog
def get_engine():
engine = create_engine(Config.SQLALCHEMY_DATABASE_URI)
return engine
def get_session():
engine = get_engine()
session = sessionmaker(bind=engine)()
return session
if __name__ == '__main__':
engine = get_engine()
sql = "select distinct remote_addr from request_log where country_code is NULL;"
df = pd.read_sql_query(sql, engine)
import time
for ip in df.remote_addr.values.tolist():
extra = parse_location(ip)
data = []
for k, v in extra.items():
if isinstance(v, str):
data.append(f' {k}="{v}" ')
if isinstance(v, float):
data.append(f' {k}={v} ')
snippet = ",".join(data)
sql = f"""update request_log set {snippet} where remote_addr="{ip}";"""
engine.execute(sql)
print(sql)
time.sleep(2)
| import pandas as pd
from server.utils import parse_location
from config import Config
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from server.model import RequestLog
def get_engine():
engine = create_engine(Config.SQLALCHEMY_DATABASE_URI)
return engine
def get_session():
engine = get_engine()
session = sessionmaker(bind=engine)()
return session
if __name__ == '__main__':
engine = get_engine()
sql = "select distinct remote_addr from request_log where country_code is NULL;"
df = pd.read_sql_query(sql, engine)
import time
for ip in df.remote_addr.values.tolist():
extra = parse_location(ip)
data = []
for k, v in extra.items():
if isinstance(v, str):
data.append(f' {k}="{v}" ')
if isinstance(v, float):
data.append(f' {k}={v} ')
snippet = ",".join(data)
sql = f"""update request_log set {snippet} where remote_addr="{ip}";"""
engine.execute(sql)
print(sql)
time.sleep(2) | en | 0.240716 | update request_log set {snippet} where remote_addr="{ip}"; | 2.594342 | 3 |
test/analytics/test_gitlab_ci.py | aexvir/the-zoo | 90 | 6633155 | <gh_stars>10-100
import yaml
from zoo.analytics.tasks import gitlab_ci as uut
YAML = """\
stages:
- build
- test
- release
- deploy
image: docker:19.03
include:
- 'https://ci-files.skypicker.com/templates/build/coala.yml'
- 'https://ci-files.skypicker.com/templates/build/black.yml'
- 'https://ci-files.skypicker.com/templates/build/docker_build.yml'
- 'https://ci-files.skypicker.com/templates/release/sonarqube_scan.yml'
- 'https://ci-files.skypicker.com/templates/deploy/.crane.yml'
- template: 'Workflows/Branch.yml'
- local: '/templates/localfile.yml'
- project: 'grp/prj'
ref: v1.0.0
file: '/templates/.builds.yml'
- project: 'grp/prj'
file:
- '/templates/.builds.yml'
- '/templates/.tests.yml'
- remote: 'https://gitlab.com/example-project/-/raw/master/.gitlab-ci.yml'
black:
stage: lint-and-build
image: kiwicom/black:19.3b0
script:
- black -l 120 --check --fast docs kw scripts test run.py setup.py
deploy_production:
extends: .crane
variables:
CRANE_SLACK_CHANNEL: the-zoo
environment:
name: production
url: https://zoo.skypicker.com
when: manual
kubeval:
stage: test
image:
name: kiwicom/kubeval
entrypoint: kubeval
script:
- analyze k8s
"""
def test_parse_docker_images():
expected_images = [
("docker", "19.03"),
("kiwicom/black", "19.3b0"),
("kiwicom/kubeval", "latest"),
]
parsed_yaml = yaml.safe_load(YAML)
images = uut.parse_docker_images(parsed_yaml)
assert len(list(images)) == 3
for image in images:
assert image.name, image.version in expected_images
def test_parse_gitlab_ci_template():
expected_templates = [
"coala",
"black",
"docker_build",
"sonarqube_scan",
".crane",
"template:Workflows/Branch.yml",
"local:/templates/localfile.yml",
"repo:grp/prg//templates/.builds.yml@v1.0.0",
"repo:grp/prg//templates/.builds.yml",
"repo:grp/prg//templates/.tests.yml",
"remote:https://gitlab.com/example-project/-/raw/master/.gitlab-ci.yml",
]
parsed_yaml = yaml.safe_load(YAML)
templates = uut.parse_gitlab_ci_template(parsed_yaml)
assert len(list(templates)) == 11
for template in templates:
assert template.name in expected_templates
| import yaml
from zoo.analytics.tasks import gitlab_ci as uut
YAML = """\
stages:
- build
- test
- release
- deploy
image: docker:19.03
include:
- 'https://ci-files.skypicker.com/templates/build/coala.yml'
- 'https://ci-files.skypicker.com/templates/build/black.yml'
- 'https://ci-files.skypicker.com/templates/build/docker_build.yml'
- 'https://ci-files.skypicker.com/templates/release/sonarqube_scan.yml'
- 'https://ci-files.skypicker.com/templates/deploy/.crane.yml'
- template: 'Workflows/Branch.yml'
- local: '/templates/localfile.yml'
- project: 'grp/prj'
ref: v1.0.0
file: '/templates/.builds.yml'
- project: 'grp/prj'
file:
- '/templates/.builds.yml'
- '/templates/.tests.yml'
- remote: 'https://gitlab.com/example-project/-/raw/master/.gitlab-ci.yml'
black:
stage: lint-and-build
image: kiwicom/black:19.3b0
script:
- black -l 120 --check --fast docs kw scripts test run.py setup.py
deploy_production:
extends: .crane
variables:
CRANE_SLACK_CHANNEL: the-zoo
environment:
name: production
url: https://zoo.skypicker.com
when: manual
kubeval:
stage: test
image:
name: kiwicom/kubeval
entrypoint: kubeval
script:
- analyze k8s
"""
def test_parse_docker_images():
expected_images = [
("docker", "19.03"),
("kiwicom/black", "19.3b0"),
("kiwicom/kubeval", "latest"),
]
parsed_yaml = yaml.safe_load(YAML)
images = uut.parse_docker_images(parsed_yaml)
assert len(list(images)) == 3
for image in images:
assert image.name, image.version in expected_images
def test_parse_gitlab_ci_template():
expected_templates = [
"coala",
"black",
"docker_build",
"sonarqube_scan",
".crane",
"template:Workflows/Branch.yml",
"local:/templates/localfile.yml",
"repo:grp/prg//templates/.builds.yml@v1.0.0",
"repo:grp/prg//templates/.builds.yml",
"repo:grp/prg//templates/.tests.yml",
"remote:https://gitlab.com/example-project/-/raw/master/.gitlab-ci.yml",
]
parsed_yaml = yaml.safe_load(YAML)
templates = uut.parse_gitlab_ci_template(parsed_yaml)
assert len(list(templates)) == 11
for template in templates:
assert template.name in expected_templates | en | 0.546559 | \ stages: - build - test - release - deploy image: docker:19.03 include: - 'https://ci-files.skypicker.com/templates/build/coala.yml' - 'https://ci-files.skypicker.com/templates/build/black.yml' - 'https://ci-files.skypicker.com/templates/build/docker_build.yml' - 'https://ci-files.skypicker.com/templates/release/sonarqube_scan.yml' - 'https://ci-files.skypicker.com/templates/deploy/.crane.yml' - template: 'Workflows/Branch.yml' - local: '/templates/localfile.yml' - project: 'grp/prj' ref: v1.0.0 file: '/templates/.builds.yml' - project: 'grp/prj' file: - '/templates/.builds.yml' - '/templates/.tests.yml' - remote: 'https://gitlab.com/example-project/-/raw/master/.gitlab-ci.yml' black: stage: lint-and-build image: kiwicom/black:19.3b0 script: - black -l 120 --check --fast docs kw scripts test run.py setup.py deploy_production: extends: .crane variables: CRANE_SLACK_CHANNEL: the-zoo environment: name: production url: https://zoo.skypicker.com when: manual kubeval: stage: test image: name: kiwicom/kubeval entrypoint: kubeval script: - analyze k8s | 2.039386 | 2 |
hierarchyapp/apps.py | KA-Randy-Charity-Jr/hierarchial_data | 0 | 6633156 | <filename>hierarchyapp/apps.py
from django.apps import AppConfig
class HierarchyappConfig(AppConfig):
name = 'hierarchyapp'
| <filename>hierarchyapp/apps.py
from django.apps import AppConfig
class HierarchyappConfig(AppConfig):
name = 'hierarchyapp'
| none | 1 | 1.250817 | 1 |
|
acculturation/datareaders.py | romankoval/comp-acculturation | 9 | 6633157 |
import os
import csv
import json
import re
try:
import mailparser
except:
print("Warning: failed to load mail-parser module. This will be an issue if you want to process .eml files.")
try:
import unidecode
except:
print("Warning: failed to load unidecode module. This will be an issue if you want to process .eml files.")
###########################################
def _get_fns_from_dir(dir_fn, ext):
"""
Search dir and subdirs for all files with given extension
"""
if not os.path.isdir(dir_fn):
# Input is a filename not a dir
return [dir_fn]
fns = []
for root, dirs, files in os.walk(dir_fn, topdown=False):
fns += [os.path.join(root, fn) for fn in files if fn.split(".")[-1] == ext]
return fns
###########################################
class CsvDataReader:
def __init__(self, csv_fn):
self.fns = _get_fns_from_dir(csv_fn, "csv")
def __iter__(self):
for fn in self.fns:
with open(fn) as f:
reader = csv.DictReader(f)
for row in reader:
yield row
f.close()
###########################################
class JsonDataReader:
"""
Expectation for these files is that
each individual line in the file is a
json-serialized document
"""
def __init__(self, json_fn):
self.fns = _get_fns_from_dir(json_fn, "json")
def __iter__(self):
for fn in self.fns:
with open(fn) as f:
for i,line in enumerate(f):
d = json.loads(line)
yield d
f.close()
@staticmethod
def write(docs, out_fn):
with open(out_fn, 'w') as outf:
for d in docs:
outf.write(json.dumps(d) + "\n")
outf.close()
###########################################
class EmlDataReader:
def __init__(self, base_dir):
self.base_dir = base_dir
self.fns = _get_fns_from_dir(base_dir, "eml")
def __iter__(self):
"""
Finds all .eml files in self.base_dir
and subdirectories of self.base_dir.
Does its best to parse each email before
releasing.
"""
# Eml exports often include duplicate emails.
# We will try to limit the duplicates we release
msg_ids = set()
for fn in self.fns:
msg = mailparser.parse_from_file(fn)
if msg.message_id in msg_ids:
continue
msg_ids.add(msg.message_id)
# Do our best to clean the msg body
body = self._clean_body(msg.body)
e = {
"message_id": msg.message_id,
# Keep only email addrs, not attempted parsed names
"from": msg.from_[0][1],
# Combine to and cc fields (i.e., no distinction made
# between direct messages and group messages)
"to": [a[1] for a in msg.to] + [a[1] for a in msg.cc],
"date": str(msg.date),
"subject": msg.subject,
"body": body,
"attachments": [a['filename'] for a in msg.attachments]
}
if not e['from'] or not e['to']:
continue
yield e
# Regexes for some common quoted text beginnings
QUOTED_TXT_RES = [
## With names & email addresses
re.compile(r"On (Mon|Tue|Wed|Thu|Fri|Sat|Sun|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday), (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9][,]? (at )?[0-9]+:[0-9][0-9][ ]?(A|P)M[,]? [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})(\n?)>[\s]?wrote:"),
re.compile(r"On (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9](,)? (at )?[0-9]+:[0-9][0-9] (AM|PM)?[ ]?[,]? [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})(\n?)>[\s]?wrote:"),
re.compile(r"On 201[0-9]-[0-9][0-9]-[0-9][0-9](,)? (at )?[0-2]?[0-9]:[0-9][0-9][ ]?, [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})[\n]?>[\s]wrote:"),
re.compile(r"On [0-9]?[0-9] (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) 201[0-9](,)? (at )?[0-9]+:[0-9][0-9][ ]?(AM|PM)?[ ]?[,]? [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})(\n?)>[\s]?wrote:"),
## With names but no email addresses
re.compile(r"On (Mon|Tue|Wed|Thu|Fri|Sat|Sun|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday), (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9](,)? (at )?[0-9]+:[0-9][0-9] (A|P)M[ ]?[,]? [ a-zA-Z\.\-\"]+[\s]*wrote:"),
re.compile(r"On (Mon|Tue|Wed|Thu|Fri|Sat|Sun|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday), (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9][,]? [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})(\n?)>[\s]?wrote:"),
re.compile(r"On (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9](,)? (at )?[0-9]+:[0-9][0-9][ ]?(AM|PM)?[ ]?[,]?[ ]?[ a-zA-Z\.\-\"]+[\s]*wrote:"),
re.compile(r"On 201[0-9]-[0-9][0-9]-[0-9][0-9](,)? (at )?[0-2]?[0-9]:[0-9][0-9][ ]?,[ ]?[ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})[\n]?>[\s]wrote:"),
re.compile(r"On [0-9]?[0-9] (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) 201[0-9](,)? (at )?[0-9]+:[0-9][0-9][ ]?(AM|PM)?[ ]?[,]? [ a-zA-Z\.\-\"]+[\s]wrote:"),
## Different date format
re.compile(r"On [0-9]?[0-9]/[0-9]?[0-9]/201[0-9] (at )?[0-2]?[0-9]:[0-9][0-9][ ]?(AM|PM)?, [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})[\n]?>[\s]wrote:"),
re.compile(r"On [0-9]?[0-9]/[0-9]?[0-9]/201[0-9] (at )?[0-2]?[0-9]:[0-9][0-9][ ]?(AM|PM)?, [ a-zA-Z\.\-\"]+[\s]wrote:"),
## Other boundary markers
re.compile(r"----- Original [Mm]essage -----"),
re.compile(r"--- mail_boundary ---"),
re.compile(r"[Ss]ent from my (iPhone|Windows|Android|mobile)"),
re.compile(r"[Ss]ent: (Mon|Tue|Wed|Thu|Fri|Sat|Sun|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)[,]? (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9](,)? (at )?[0-9]+:[0-9][0-9]"),
]
def _clean_body(self, body):
"""
This function attempts to strip quoted text
from an email body so that only the text actually
written by the email sender remains.
Email formats are messy and heterogeneous,
and this function does not catch all quoted text
or all signatures and should not be considered a
"complete" (and certainly not an elegant ha!) solution.
We recommend testing and expanding this functionality
using your own data. (For example, you may also want to
catch and remove automated messages, etc.)
"""
body = unidecode.unidecode(body)
# Strip quoted text
for quot_re in self.QUOTED_TXT_RES:
body = quot_re.split(body)[0]
# Try to remove inserted newlines
# to recover intended paragraph splits--
# rough and dirty style
lines = body.split("\n")
chunks = []
active_chunk = lines[0]
for i in range(1, len(lines)):
prev_line = lines[i-1]
curr_line = lines[i]
if len(prev_line) >= 65 and len(prev_line) <= 75:
# curr_line probably used to be part of prev_line
active_chunk += " " + curr_line
else:
chunks.append(active_chunk)
active_chunk = curr_line
chunks.append(active_chunk)
body = "\n".join(chunks)
body = body.replace(" ", " ")
return body
|
import os
import csv
import json
import re
try:
import mailparser
except:
print("Warning: failed to load mail-parser module. This will be an issue if you want to process .eml files.")
try:
import unidecode
except:
print("Warning: failed to load unidecode module. This will be an issue if you want to process .eml files.")
###########################################
def _get_fns_from_dir(dir_fn, ext):
"""
Search dir and subdirs for all files with given extension
"""
if not os.path.isdir(dir_fn):
# Input is a filename not a dir
return [dir_fn]
fns = []
for root, dirs, files in os.walk(dir_fn, topdown=False):
fns += [os.path.join(root, fn) for fn in files if fn.split(".")[-1] == ext]
return fns
###########################################
class CsvDataReader:
def __init__(self, csv_fn):
self.fns = _get_fns_from_dir(csv_fn, "csv")
def __iter__(self):
for fn in self.fns:
with open(fn) as f:
reader = csv.DictReader(f)
for row in reader:
yield row
f.close()
###########################################
class JsonDataReader:
"""
Expectation for these files is that
each individual line in the file is a
json-serialized document
"""
def __init__(self, json_fn):
self.fns = _get_fns_from_dir(json_fn, "json")
def __iter__(self):
for fn in self.fns:
with open(fn) as f:
for i,line in enumerate(f):
d = json.loads(line)
yield d
f.close()
@staticmethod
def write(docs, out_fn):
with open(out_fn, 'w') as outf:
for d in docs:
outf.write(json.dumps(d) + "\n")
outf.close()
###########################################
class EmlDataReader:
def __init__(self, base_dir):
self.base_dir = base_dir
self.fns = _get_fns_from_dir(base_dir, "eml")
def __iter__(self):
"""
Finds all .eml files in self.base_dir
and subdirectories of self.base_dir.
Does its best to parse each email before
releasing.
"""
# Eml exports often include duplicate emails.
# We will try to limit the duplicates we release
msg_ids = set()
for fn in self.fns:
msg = mailparser.parse_from_file(fn)
if msg.message_id in msg_ids:
continue
msg_ids.add(msg.message_id)
# Do our best to clean the msg body
body = self._clean_body(msg.body)
e = {
"message_id": msg.message_id,
# Keep only email addrs, not attempted parsed names
"from": msg.from_[0][1],
# Combine to and cc fields (i.e., no distinction made
# between direct messages and group messages)
"to": [a[1] for a in msg.to] + [a[1] for a in msg.cc],
"date": str(msg.date),
"subject": msg.subject,
"body": body,
"attachments": [a['filename'] for a in msg.attachments]
}
if not e['from'] or not e['to']:
continue
yield e
# Regexes for some common quoted text beginnings
QUOTED_TXT_RES = [
## With names & email addresses
re.compile(r"On (Mon|Tue|Wed|Thu|Fri|Sat|Sun|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday), (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9][,]? (at )?[0-9]+:[0-9][0-9][ ]?(A|P)M[,]? [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})(\n?)>[\s]?wrote:"),
re.compile(r"On (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9](,)? (at )?[0-9]+:[0-9][0-9] (AM|PM)?[ ]?[,]? [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})(\n?)>[\s]?wrote:"),
re.compile(r"On 201[0-9]-[0-9][0-9]-[0-9][0-9](,)? (at )?[0-2]?[0-9]:[0-9][0-9][ ]?, [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})[\n]?>[\s]wrote:"),
re.compile(r"On [0-9]?[0-9] (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) 201[0-9](,)? (at )?[0-9]+:[0-9][0-9][ ]?(AM|PM)?[ ]?[,]? [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})(\n?)>[\s]?wrote:"),
## With names but no email addresses
re.compile(r"On (Mon|Tue|Wed|Thu|Fri|Sat|Sun|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday), (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9](,)? (at )?[0-9]+:[0-9][0-9] (A|P)M[ ]?[,]? [ a-zA-Z\.\-\"]+[\s]*wrote:"),
re.compile(r"On (Mon|Tue|Wed|Thu|Fri|Sat|Sun|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday), (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9][,]? [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})(\n?)>[\s]?wrote:"),
re.compile(r"On (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9](,)? (at )?[0-9]+:[0-9][0-9][ ]?(AM|PM)?[ ]?[,]?[ ]?[ a-zA-Z\.\-\"]+[\s]*wrote:"),
re.compile(r"On 201[0-9]-[0-9][0-9]-[0-9][0-9](,)? (at )?[0-2]?[0-9]:[0-9][0-9][ ]?,[ ]?[ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})[\n]?>[\s]wrote:"),
re.compile(r"On [0-9]?[0-9] (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) 201[0-9](,)? (at )?[0-9]+:[0-9][0-9][ ]?(AM|PM)?[ ]?[,]? [ a-zA-Z\.\-\"]+[\s]wrote:"),
## Different date format
re.compile(r"On [0-9]?[0-9]/[0-9]?[0-9]/201[0-9] (at )?[0-2]?[0-9]:[0-9][0-9][ ]?(AM|PM)?, [ a-zA-Z\.\-\"]+[\s]<[\n]?(?:[\w._%+-]+@[\w._%+-]+\.\w{2,})[\n]?>[\s]wrote:"),
re.compile(r"On [0-9]?[0-9]/[0-9]?[0-9]/201[0-9] (at )?[0-2]?[0-9]:[0-9][0-9][ ]?(AM|PM)?, [ a-zA-Z\.\-\"]+[\s]wrote:"),
## Other boundary markers
re.compile(r"----- Original [Mm]essage -----"),
re.compile(r"--- mail_boundary ---"),
re.compile(r"[Ss]ent from my (iPhone|Windows|Android|mobile)"),
re.compile(r"[Ss]ent: (Mon|Tue|Wed|Thu|Fri|Sat|Sun|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)[,]? (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December) [0-9]+, 201[0-9](,)? (at )?[0-9]+:[0-9][0-9]"),
]
def _clean_body(self, body):
"""
This function attempts to strip quoted text
from an email body so that only the text actually
written by the email sender remains.
Email formats are messy and heterogeneous,
and this function does not catch all quoted text
or all signatures and should not be considered a
"complete" (and certainly not an elegant ha!) solution.
We recommend testing and expanding this functionality
using your own data. (For example, you may also want to
catch and remove automated messages, etc.)
"""
body = unidecode.unidecode(body)
# Strip quoted text
for quot_re in self.QUOTED_TXT_RES:
body = quot_re.split(body)[0]
# Try to remove inserted newlines
# to recover intended paragraph splits--
# rough and dirty style
lines = body.split("\n")
chunks = []
active_chunk = lines[0]
for i in range(1, len(lines)):
prev_line = lines[i-1]
curr_line = lines[i]
if len(prev_line) >= 65 and len(prev_line) <= 75:
# curr_line probably used to be part of prev_line
active_chunk += " " + curr_line
else:
chunks.append(active_chunk)
active_chunk = curr_line
chunks.append(active_chunk)
body = "\n".join(chunks)
body = body.replace(" ", " ")
return body
| en | 0.797186 | ########################################### Search dir and subdirs for all files with given extension # Input is a filename not a dir ########################################### ########################################### Expectation for these files is that each individual line in the file is a json-serialized document ########################################### Finds all .eml files in self.base_dir and subdirectories of self.base_dir. Does its best to parse each email before releasing. # Eml exports often include duplicate emails. # We will try to limit the duplicates we release # Do our best to clean the msg body # Keep only email addrs, not attempted parsed names # Combine to and cc fields (i.e., no distinction made # between direct messages and group messages) # Regexes for some common quoted text beginnings ## With names & email addresses ## With names but no email addresses ## Different date format ## Other boundary markers This function attempts to strip quoted text from an email body so that only the text actually written by the email sender remains. Email formats are messy and heterogeneous, and this function does not catch all quoted text or all signatures and should not be considered a "complete" (and certainly not an elegant ha!) solution. We recommend testing and expanding this functionality using your own data. (For example, you may also want to catch and remove automated messages, etc.) # Strip quoted text # Try to remove inserted newlines # to recover intended paragraph splits-- # rough and dirty style # curr_line probably used to be part of prev_line | 2.860242 | 3 |
deta/service.py | LemonPi314/deta-python | 0 | 6633158 | <reponame>LemonPi314/deta-python
import http.client
import os
import json
import socket
import struct
import typing
import urllib.error
JSON_MIME = "application/json"
class _Service:
def __init__(
self,
project_key: str,
project_id: str,
host: str,
name: str,
timeout: int,
keep_alive: bool = True,
):
self.project_key = project_key
self.base_path = f"/v1/{project_id}/{name}"
self.host = host
self.timeout = timeout
self.keep_alive = keep_alive
self.client = http.client.HTTPSConnection(host, timeout=timeout) if keep_alive else None
def _is_socket_closed(self):
if not self.client.sock:
return True
fmt = "B" * 7 + "I" * 21
tcp_info = struct.unpack(
fmt, self.client.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 92)
)
# 8 = CLOSE_WAIT
if len(tcp_info) > 0 and tcp_info[0] == 8:
return True
return False
def _request(
self,
path: str,
method: str,
data: typing.Union[str, bytes, dict] = None,
headers: dict = None,
content_type: str = None,
stream: bool = False,
):
url = self.base_path + path
headers = headers or {}
headers["X-Api-Key"] = self.project_key
if content_type:
headers["Content-Type"] = content_type
if not self.keep_alive:
headers["Connection"] = "close"
# close connection if socket is closed
# fix for a bug in lambda
try:
if (
self.client
and os.environ.get("DETA_RUNTIME") == "true"
and self._is_socket_closed()
):
self.client.close()
except Exception:
pass
# send request
body = json.dumps(data) if content_type == JSON_MIME else data
# response
res = self._send_request_with_retry(method, url, headers, body)
status = res.status
if status not in [200, 201, 202, 207]:
# need to read the response so subsequent requests can be sent on the client
res.read()
if not self.keep_alive:
self.client.close()
# return None if not found
if status == 404:
return status, None
raise urllib.error.HTTPError(url, status, res.reason, res.headers, res.fp)
# if stream return the response and client without reading and closing the client
if stream:
return status, res
# return json if application/json
payload = (
json.loads(res.read()) if JSON_MIME in res.getheader("content-type") else res.read()
)
if not self.keep_alive:
self.client.close()
return status, payload
def _send_request_with_retry(
self,
method: str,
url: str,
headers: dict = None,
body: typing.Union[str, bytes, dict] = None,
retry=2, # try at least twice to regain a new connection
):
reinitializeConnection = False
while retry > 0:
try:
if not self.keep_alive or reinitializeConnection:
self.client = http.client.HTTPSConnection(host=self.host, timeout=self.timeout)
self.client.request(
method,
url,
headers=headers,
body=body,
)
res = self.client.getresponse()
return res
except http.client.RemoteDisconnected:
reinitializeConnection = True
retry -= 1
| import http.client
import os
import json
import socket
import struct
import typing
import urllib.error
JSON_MIME = "application/json"
class _Service:
def __init__(
self,
project_key: str,
project_id: str,
host: str,
name: str,
timeout: int,
keep_alive: bool = True,
):
self.project_key = project_key
self.base_path = f"/v1/{project_id}/{name}"
self.host = host
self.timeout = timeout
self.keep_alive = keep_alive
self.client = http.client.HTTPSConnection(host, timeout=timeout) if keep_alive else None
def _is_socket_closed(self):
if not self.client.sock:
return True
fmt = "B" * 7 + "I" * 21
tcp_info = struct.unpack(
fmt, self.client.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 92)
)
# 8 = CLOSE_WAIT
if len(tcp_info) > 0 and tcp_info[0] == 8:
return True
return False
def _request(
self,
path: str,
method: str,
data: typing.Union[str, bytes, dict] = None,
headers: dict = None,
content_type: str = None,
stream: bool = False,
):
url = self.base_path + path
headers = headers or {}
headers["X-Api-Key"] = self.project_key
if content_type:
headers["Content-Type"] = content_type
if not self.keep_alive:
headers["Connection"] = "close"
# close connection if socket is closed
# fix for a bug in lambda
try:
if (
self.client
and os.environ.get("DETA_RUNTIME") == "true"
and self._is_socket_closed()
):
self.client.close()
except Exception:
pass
# send request
body = json.dumps(data) if content_type == JSON_MIME else data
# response
res = self._send_request_with_retry(method, url, headers, body)
status = res.status
if status not in [200, 201, 202, 207]:
# need to read the response so subsequent requests can be sent on the client
res.read()
if not self.keep_alive:
self.client.close()
# return None if not found
if status == 404:
return status, None
raise urllib.error.HTTPError(url, status, res.reason, res.headers, res.fp)
# if stream return the response and client without reading and closing the client
if stream:
return status, res
# return json if application/json
payload = (
json.loads(res.read()) if JSON_MIME in res.getheader("content-type") else res.read()
)
if not self.keep_alive:
self.client.close()
return status, payload
def _send_request_with_retry(
self,
method: str,
url: str,
headers: dict = None,
body: typing.Union[str, bytes, dict] = None,
retry=2, # try at least twice to regain a new connection
):
reinitializeConnection = False
while retry > 0:
try:
if not self.keep_alive or reinitializeConnection:
self.client = http.client.HTTPSConnection(host=self.host, timeout=self.timeout)
self.client.request(
method,
url,
headers=headers,
body=body,
)
res = self.client.getresponse()
return res
except http.client.RemoteDisconnected:
reinitializeConnection = True
retry -= 1 | en | 0.884008 | # 8 = CLOSE_WAIT # close connection if socket is closed # fix for a bug in lambda # send request # response # need to read the response so subsequent requests can be sent on the client # return None if not found # if stream return the response and client without reading and closing the client # return json if application/json # try at least twice to regain a new connection | 2.344564 | 2 |
run.py | W1Fl/- | 2 | 6633159 | <reponame>W1Fl/-
from PIL import Image
import os
import codec
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
imgurl=os.path.join('test',input('请输入目标图片的编号--> ')+'.png')
img=Image.open(imgurl)
import work
sc=work.comput(np.array(img))[0][0]
print('分析器认为原图有{}%的概率被隐写过'.format(sc*100))
stgimg=codec.encodeDataInImage(img,input('请输入需要隐写的信息--> '))
sc=work.comput(np.array(stgimg))[0][0]
print('分析器认为被隐写后的图有{}%的概率被隐写过'.format(sc*100))
try:
os.mkdir('teststg')
except:
...
stgimg.save('teststg/stg.png')
work.sess.close()
print('被隐写的图片保存到了teststg目录下,请运行adversary.py生成对抗样本')
os._exit(0) | from PIL import Image
import os
import codec
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
imgurl=os.path.join('test',input('请输入目标图片的编号--> ')+'.png')
img=Image.open(imgurl)
import work
sc=work.comput(np.array(img))[0][0]
print('分析器认为原图有{}%的概率被隐写过'.format(sc*100))
stgimg=codec.encodeDataInImage(img,input('请输入需要隐写的信息--> '))
sc=work.comput(np.array(stgimg))[0][0]
print('分析器认为被隐写后的图有{}%的概率被隐写过'.format(sc*100))
try:
os.mkdir('teststg')
except:
...
stgimg.save('teststg/stg.png')
work.sess.close()
print('被隐写的图片保存到了teststg目录下,请运行adversary.py生成对抗样本')
os._exit(0) | none | 1 | 2.186587 | 2 |
|
model/wordrep.py | cslydia/BiFlaG | 59 | 6633160 | <filename>model/wordrep.py
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import numpy as np
from .charcnn import IntNet
class WordRep(nn.Module):
def __init__(self, data):
super(WordRep, self).__init__()
print("build word representation...")
self.gpu = data.HP_gpu
self.use_char = data.use_char
self.batch_size = data.HP_batch_size
self.char_hidden_dim = 0
self.char_all_feature = False
self.sentence_classification = data.sentence_classification
if self.use_char:
self.char_hidden_dim = data.HP_char_hidden_dim
self.char_embedding_dim = data.char_emb_dim
if data.char_feature_extractor == "CNN":
self.char_feature = CharCNN(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "LSTM":
self.char_feature = CharBiLSTM(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "GRU":
self.char_feature = CharBiGRU(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "IntNet":
self.char_feature = IntNet(data.char_alphabet.size(), self.char_embedding_dim, data.HP_intNet_layer, data.HP_intNet_kernel_type, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "ALL":
self.char_all_feature = True
self.char_feature = CharCNN(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
self.char_feature_extra = CharBiLSTM(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
else:
print("Error char feature selection, please check parameter data.char_feature_extractor (CNN/LSTM/GRU/ALL).")
exit(0)
self.embedding_dim = data.word_emb_dim
self.drop = nn.Dropout(data.HP_dropout)
self.word_embedding = nn.Embedding(data.word_alphabet.size(), self.embedding_dim)
if data.pretrain_word_embedding is not None:
self.word_embedding.weight.data.copy_(torch.from_numpy(data.pretrain_word_embedding))
else:
self.word_embedding.weight.data.copy_(torch.from_numpy(self.random_embedding(data.word_alphabet.size(), self.embedding_dim)))
self.feature_num = data.feature_num
self.feature_embedding_dims = data.feature_emb_dims
self.feature_embeddings = nn.ModuleList()
for idx in range(self.feature_num):
self.feature_embeddings.append(nn.Embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx]))
for idx in range(self.feature_num):
if data.pretrain_feature_embeddings[idx] is not None:
self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(data.pretrain_feature_embeddings[idx]))
else:
self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(self.random_embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx])))
if self.gpu:
self.drop = self.drop.cuda()
self.word_embedding = self.word_embedding.cuda()
for idx in range(self.feature_num):
self.feature_embeddings[idx] = self.feature_embeddings[idx].cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def forward(self, word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover):
"""
input:
word_inputs: (batch_size, sent_len)
features: list [(batch_size, sent_len), (batch_len, sent_len),...]
word_seq_lengths: list of batch_size, (batch_size,1)
char_inputs: (batch_size*sent_len, word_length)
char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1)
char_seq_recover: variable which records the char order information, used to recover char order
output:
Variable(batch_size, sent_len, hidden_dim)
"""
batch_size = word_inputs.size(0)
sent_len = word_inputs.size(1)
word_embs = self.word_embedding(word_inputs)
word_list = [word_embs]
if self.use_char:
## calculate char lstm last hidden
# print("charinput:", char_inputs)
# exit(0)
char_features = self.char_feature.get_last_hiddens(char_inputs, char_seq_lengths.cpu().numpy(), batch_size,sent_len)
char_features = char_features[char_seq_recover]
char_features = char_features.view(batch_size,sent_len,-1)
## concat word and char together
word_list.append(char_features)
word_embs = torch.cat([word_embs, char_features], 2)
if self.char_all_feature:
char_features_extra = self.char_feature_extra.get_last_hiddens(char_inputs, char_seq_lengths.cpu().numpy())
char_features_extra = char_features_extra[char_seq_recover]
char_features_extra = char_features_extra.view(batch_size,sent_len,-1)
## concat word and char together
word_list.append(char_features_extra)
word_embs = torch.cat(word_list, 2)
word_represent = self.drop(word_embs)
return word_represent
| <filename>model/wordrep.py
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import numpy as np
from .charcnn import IntNet
class WordRep(nn.Module):
def __init__(self, data):
super(WordRep, self).__init__()
print("build word representation...")
self.gpu = data.HP_gpu
self.use_char = data.use_char
self.batch_size = data.HP_batch_size
self.char_hidden_dim = 0
self.char_all_feature = False
self.sentence_classification = data.sentence_classification
if self.use_char:
self.char_hidden_dim = data.HP_char_hidden_dim
self.char_embedding_dim = data.char_emb_dim
if data.char_feature_extractor == "CNN":
self.char_feature = CharCNN(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "LSTM":
self.char_feature = CharBiLSTM(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "GRU":
self.char_feature = CharBiGRU(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "IntNet":
self.char_feature = IntNet(data.char_alphabet.size(), self.char_embedding_dim, data.HP_intNet_layer, data.HP_intNet_kernel_type, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "ALL":
self.char_all_feature = True
self.char_feature = CharCNN(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
self.char_feature_extra = CharBiLSTM(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
else:
print("Error char feature selection, please check parameter data.char_feature_extractor (CNN/LSTM/GRU/ALL).")
exit(0)
self.embedding_dim = data.word_emb_dim
self.drop = nn.Dropout(data.HP_dropout)
self.word_embedding = nn.Embedding(data.word_alphabet.size(), self.embedding_dim)
if data.pretrain_word_embedding is not None:
self.word_embedding.weight.data.copy_(torch.from_numpy(data.pretrain_word_embedding))
else:
self.word_embedding.weight.data.copy_(torch.from_numpy(self.random_embedding(data.word_alphabet.size(), self.embedding_dim)))
self.feature_num = data.feature_num
self.feature_embedding_dims = data.feature_emb_dims
self.feature_embeddings = nn.ModuleList()
for idx in range(self.feature_num):
self.feature_embeddings.append(nn.Embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx]))
for idx in range(self.feature_num):
if data.pretrain_feature_embeddings[idx] is not None:
self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(data.pretrain_feature_embeddings[idx]))
else:
self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(self.random_embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx])))
if self.gpu:
self.drop = self.drop.cuda()
self.word_embedding = self.word_embedding.cuda()
for idx in range(self.feature_num):
self.feature_embeddings[idx] = self.feature_embeddings[idx].cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def forward(self, word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover):
"""
input:
word_inputs: (batch_size, sent_len)
features: list [(batch_size, sent_len), (batch_len, sent_len),...]
word_seq_lengths: list of batch_size, (batch_size,1)
char_inputs: (batch_size*sent_len, word_length)
char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1)
char_seq_recover: variable which records the char order information, used to recover char order
output:
Variable(batch_size, sent_len, hidden_dim)
"""
batch_size = word_inputs.size(0)
sent_len = word_inputs.size(1)
word_embs = self.word_embedding(word_inputs)
word_list = [word_embs]
if self.use_char:
## calculate char lstm last hidden
# print("charinput:", char_inputs)
# exit(0)
char_features = self.char_feature.get_last_hiddens(char_inputs, char_seq_lengths.cpu().numpy(), batch_size,sent_len)
char_features = char_features[char_seq_recover]
char_features = char_features.view(batch_size,sent_len,-1)
## concat word and char together
word_list.append(char_features)
word_embs = torch.cat([word_embs, char_features], 2)
if self.char_all_feature:
char_features_extra = self.char_feature_extra.get_last_hiddens(char_inputs, char_seq_lengths.cpu().numpy())
char_features_extra = char_features_extra[char_seq_recover]
char_features_extra = char_features_extra.view(batch_size,sent_len,-1)
## concat word and char together
word_list.append(char_features_extra)
word_embs = torch.cat(word_list, 2)
word_represent = self.drop(word_embs)
return word_represent
| en | 0.567349 | input: word_inputs: (batch_size, sent_len) features: list [(batch_size, sent_len), (batch_len, sent_len),...] word_seq_lengths: list of batch_size, (batch_size,1) char_inputs: (batch_size*sent_len, word_length) char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1) char_seq_recover: variable which records the char order information, used to recover char order output: Variable(batch_size, sent_len, hidden_dim) ## calculate char lstm last hidden # print("charinput:", char_inputs) # exit(0) ## concat word and char together ## concat word and char together | 2.722334 | 3 |
bambi/backends/pymc.py | OriolAbril/bambi | 0 | 6633161 | import logging
import numpy as np
import theano
import pymc3 as pm
from bambi.priors import Prior
import bambi.version as version
from .base import BackEnd
_log = logging.getLogger("bambi")
class PyMC3BackEnd(BackEnd):
"""PyMC3 model-fitting backend."""
# Available link functions
links = {
"identity": lambda x: x,
"logit": theano.tensor.nnet.sigmoid,
"inverse": theano.tensor.inv,
"inverse_squared": lambda x: theano.tensor.inv(theano.tensor.sqrt(x)),
"log": theano.tensor.exp,
}
dists = {"HalfFlat": pm.Bound(pm.Flat, lower=0)}
def __init__(self):
self.name = pm.__name__
self.version = pm.__version__
# Attributes defined elsewhere
self.model = None
self.mu = None # build()
self.spec = None # build()
self.trace = None # build()
self.advi_params = None # build()
# Inspect all args in case we have hyperparameters
def _expand_args(self, key, value, label, noncentered):
if isinstance(value, Prior):
label = f"{label}_{key}"
return self._build_dist(noncentered, label, value.name, **value.args)
return value
def _build_dist(self, noncentered, label, dist, **kwargs):
"""Build and return a PyMC3 Distribution."""
if isinstance(dist, str):
if hasattr(pm, dist):
dist = getattr(pm, dist)
elif dist in self.dists:
dist = self.dists[dist]
else:
raise ValueError(
f"The Distribution {dist} was not found in PyMC3 or the PyMC3BackEnd."
)
kwargs = {k: self._expand_args(k, v, label, noncentered) for (k, v) in kwargs.items()}
# Non-centered parameterization for hyperpriors
if (
noncentered
and "sigma" in kwargs
and "observed" not in kwargs
and isinstance(kwargs["sigma"], pm.model.TransformedRV)
):
old_sigma = kwargs["sigma"]
_offset = pm.Normal(label + "_offset", mu=0, sigma=1, shape=kwargs["shape"])
return pm.Deterministic(label, _offset * old_sigma)
return dist(label, **kwargs)
def build(self, spec): # pylint: disable=arguments-differ
"""Compile the PyMC3 model from an abstract model specification.
Parameters
----------
spec : Bambi model
A Bambi ``Model`` instance containing the abstract specification of the model
to compile.
"""
coords = spec._get_pymc_coords() # pylint: disable=protected-access
self.model = pm.Model(coords=coords)
noncentered = spec.noncentered
with self.model:
self.mu = 0.0
for term in spec.terms.values():
data = term.data
label = term.name
dist_name = term.prior.name
dist_args = term.prior.args
dist_shape = term.data.shape[1]
if dist_shape == 1:
dist_shape = ()
coef = self._build_dist(
noncentered, label, dist_name, shape=dist_shape, **dist_args
)
if term.group_specific:
self.mu += coef[term.group_index][:, None] * term.predictor
else:
self.mu += pm.math.dot(data, coef)[:, None]
response = spec.response.data
response_name = spec.response.name
response_prior = spec.family.prior
link_f = spec.family.link
if isinstance(link_f, str):
link_f = self.links[link_f]
response_prior.args[spec.family.parent] = link_f(self.mu)
response_prior.args["observed"] = response
self._build_dist(noncentered, response_name, response_prior.name, **response_prior.args)
self.spec = spec
# pylint: disable=arguments-differ, inconsistent-return-statements
def run(
self, start=None, method="mcmc", init="auto", n_init=50000, omit_offsets=True, **kwargs
):
"""Run the PyMC3 MCMC sampler.
Parameters
----------
start: dict, or array of dict
Starting parameter values to pass to sampler; see ``pm.sample()`` for details.
method: str
The method to use for fitting the model. By default, ``'mcmc'``, in which case the
PyMC3 sampler will be used. Alternatively, ``'advi'``, in which case the model will be
fitted using automatic differentiation variational inference as implemented in PyMC3.
Finally, ``'laplace'``, in which case a laplace approximation is used, ``'laplace'`` is
not recommended other than for pedagogical use.
init: str
Initialization method (see PyMC3 sampler documentation). Currently, this is
``'jitter+adapt_diag'``, but this can change in the future.
n_init: int
Number of initialization iterations if ``init = 'advi'`` or '``init = 'nuts'``.
Default is kind of in PyMC3 for the kinds of models we expect to see run with Bambi,
so we lower it considerably.
omit_offsets: bool
Omits offset terms in the ``InferenceData`` object when the model includes
group specific effects. Defaults to ``True``.
Returns
-------
An ArviZ ``InferenceData`` instance.
"""
model = self.model
if method.lower() == "mcmc":
draws = kwargs.pop("draws", 1000)
with model:
idata = pm.sample(
draws,
start=start,
init=init,
n_init=n_init,
return_inferencedata=True,
**kwargs,
)
if omit_offsets:
offset_dims = [vn for vn in idata.posterior.dims if "offset" in vn]
idata.posterior = idata.posterior.drop_dims(offset_dims)
for group in idata.groups():
getattr(idata, group).attrs["modeling_interface"] = "bambi"
getattr(idata, group).attrs["modeling_interface_version"] = version.__version__
return idata
elif method.lower() == "advi":
with model:
self.advi_params = pm.variational.ADVI(start, **kwargs)
return (
self.advi_params
) # this should return an InferenceData object (once arviz adds support for VI)
elif method.lower() == "laplace":
return _laplace(model)
def _laplace(model):
"""Fit a model using a Laplace approximation.
Mainly for pedagogical use. ``mcmc`` and ``advi`` are better approximations.
Parameters
----------
model: PyMC3 model
Returns
-------
Dictionary, the keys are the names of the variables and the values tuples of modes and standard
deviations.
"""
with model:
varis = [v for v in model.unobserved_RVs if not pm.util.is_transformed_name(v.name)]
maps = pm.find_MAP(start=model.test_point, vars=varis)
hessian = pm.find_hessian(maps, vars=varis)
if np.linalg.det(hessian) == 0:
raise np.linalg.LinAlgError("Singular matrix. Use mcmc or advi method")
stds = np.diag(np.linalg.inv(hessian) ** 0.5)
maps = [v for (k, v) in maps.items() if not pm.util.is_transformed_name(k)]
modes = [v.item() if v.size == 1 else v for v in maps]
names = [v.name for v in varis]
shapes = [np.atleast_1d(mode).shape for mode in modes]
stds_reshaped = []
idx0 = 0
for shape in shapes:
idx1 = idx0 + sum(shape)
stds_reshaped.append(np.reshape(stds[idx0:idx1], shape))
idx0 = idx1
return dict(zip(names, zip(modes, stds_reshaped)))
| import logging
import numpy as np
import theano
import pymc3 as pm
from bambi.priors import Prior
import bambi.version as version
from .base import BackEnd
_log = logging.getLogger("bambi")
class PyMC3BackEnd(BackEnd):
"""PyMC3 model-fitting backend."""
# Available link functions
links = {
"identity": lambda x: x,
"logit": theano.tensor.nnet.sigmoid,
"inverse": theano.tensor.inv,
"inverse_squared": lambda x: theano.tensor.inv(theano.tensor.sqrt(x)),
"log": theano.tensor.exp,
}
dists = {"HalfFlat": pm.Bound(pm.Flat, lower=0)}
def __init__(self):
self.name = pm.__name__
self.version = pm.__version__
# Attributes defined elsewhere
self.model = None
self.mu = None # build()
self.spec = None # build()
self.trace = None # build()
self.advi_params = None # build()
# Inspect all args in case we have hyperparameters
def _expand_args(self, key, value, label, noncentered):
if isinstance(value, Prior):
label = f"{label}_{key}"
return self._build_dist(noncentered, label, value.name, **value.args)
return value
def _build_dist(self, noncentered, label, dist, **kwargs):
"""Build and return a PyMC3 Distribution."""
if isinstance(dist, str):
if hasattr(pm, dist):
dist = getattr(pm, dist)
elif dist in self.dists:
dist = self.dists[dist]
else:
raise ValueError(
f"The Distribution {dist} was not found in PyMC3 or the PyMC3BackEnd."
)
kwargs = {k: self._expand_args(k, v, label, noncentered) for (k, v) in kwargs.items()}
# Non-centered parameterization for hyperpriors
if (
noncentered
and "sigma" in kwargs
and "observed" not in kwargs
and isinstance(kwargs["sigma"], pm.model.TransformedRV)
):
old_sigma = kwargs["sigma"]
_offset = pm.Normal(label + "_offset", mu=0, sigma=1, shape=kwargs["shape"])
return pm.Deterministic(label, _offset * old_sigma)
return dist(label, **kwargs)
def build(self, spec): # pylint: disable=arguments-differ
"""Compile the PyMC3 model from an abstract model specification.
Parameters
----------
spec : Bambi model
A Bambi ``Model`` instance containing the abstract specification of the model
to compile.
"""
coords = spec._get_pymc_coords() # pylint: disable=protected-access
self.model = pm.Model(coords=coords)
noncentered = spec.noncentered
with self.model:
self.mu = 0.0
for term in spec.terms.values():
data = term.data
label = term.name
dist_name = term.prior.name
dist_args = term.prior.args
dist_shape = term.data.shape[1]
if dist_shape == 1:
dist_shape = ()
coef = self._build_dist(
noncentered, label, dist_name, shape=dist_shape, **dist_args
)
if term.group_specific:
self.mu += coef[term.group_index][:, None] * term.predictor
else:
self.mu += pm.math.dot(data, coef)[:, None]
response = spec.response.data
response_name = spec.response.name
response_prior = spec.family.prior
link_f = spec.family.link
if isinstance(link_f, str):
link_f = self.links[link_f]
response_prior.args[spec.family.parent] = link_f(self.mu)
response_prior.args["observed"] = response
self._build_dist(noncentered, response_name, response_prior.name, **response_prior.args)
self.spec = spec
# pylint: disable=arguments-differ, inconsistent-return-statements
def run(
self, start=None, method="mcmc", init="auto", n_init=50000, omit_offsets=True, **kwargs
):
"""Run the PyMC3 MCMC sampler.
Parameters
----------
start: dict, or array of dict
Starting parameter values to pass to sampler; see ``pm.sample()`` for details.
method: str
The method to use for fitting the model. By default, ``'mcmc'``, in which case the
PyMC3 sampler will be used. Alternatively, ``'advi'``, in which case the model will be
fitted using automatic differentiation variational inference as implemented in PyMC3.
Finally, ``'laplace'``, in which case a laplace approximation is used, ``'laplace'`` is
not recommended other than for pedagogical use.
init: str
Initialization method (see PyMC3 sampler documentation). Currently, this is
``'jitter+adapt_diag'``, but this can change in the future.
n_init: int
Number of initialization iterations if ``init = 'advi'`` or '``init = 'nuts'``.
Default is kind of in PyMC3 for the kinds of models we expect to see run with Bambi,
so we lower it considerably.
omit_offsets: bool
Omits offset terms in the ``InferenceData`` object when the model includes
group specific effects. Defaults to ``True``.
Returns
-------
An ArviZ ``InferenceData`` instance.
"""
model = self.model
if method.lower() == "mcmc":
draws = kwargs.pop("draws", 1000)
with model:
idata = pm.sample(
draws,
start=start,
init=init,
n_init=n_init,
return_inferencedata=True,
**kwargs,
)
if omit_offsets:
offset_dims = [vn for vn in idata.posterior.dims if "offset" in vn]
idata.posterior = idata.posterior.drop_dims(offset_dims)
for group in idata.groups():
getattr(idata, group).attrs["modeling_interface"] = "bambi"
getattr(idata, group).attrs["modeling_interface_version"] = version.__version__
return idata
elif method.lower() == "advi":
with model:
self.advi_params = pm.variational.ADVI(start, **kwargs)
return (
self.advi_params
) # this should return an InferenceData object (once arviz adds support for VI)
elif method.lower() == "laplace":
return _laplace(model)
def _laplace(model):
"""Fit a model using a Laplace approximation.
Mainly for pedagogical use. ``mcmc`` and ``advi`` are better approximations.
Parameters
----------
model: PyMC3 model
Returns
-------
Dictionary, the keys are the names of the variables and the values tuples of modes and standard
deviations.
"""
with model:
varis = [v for v in model.unobserved_RVs if not pm.util.is_transformed_name(v.name)]
maps = pm.find_MAP(start=model.test_point, vars=varis)
hessian = pm.find_hessian(maps, vars=varis)
if np.linalg.det(hessian) == 0:
raise np.linalg.LinAlgError("Singular matrix. Use mcmc or advi method")
stds = np.diag(np.linalg.inv(hessian) ** 0.5)
maps = [v for (k, v) in maps.items() if not pm.util.is_transformed_name(k)]
modes = [v.item() if v.size == 1 else v for v in maps]
names = [v.name for v in varis]
shapes = [np.atleast_1d(mode).shape for mode in modes]
stds_reshaped = []
idx0 = 0
for shape in shapes:
idx1 = idx0 + sum(shape)
stds_reshaped.append(np.reshape(stds[idx0:idx1], shape))
idx0 = idx1
return dict(zip(names, zip(modes, stds_reshaped)))
| en | 0.720796 | PyMC3 model-fitting backend. # Available link functions # Attributes defined elsewhere # build() # build() # build() # build() # Inspect all args in case we have hyperparameters Build and return a PyMC3 Distribution. # Non-centered parameterization for hyperpriors # pylint: disable=arguments-differ Compile the PyMC3 model from an abstract model specification. Parameters ---------- spec : Bambi model A Bambi ``Model`` instance containing the abstract specification of the model to compile. # pylint: disable=protected-access # pylint: disable=arguments-differ, inconsistent-return-statements Run the PyMC3 MCMC sampler. Parameters ---------- start: dict, or array of dict Starting parameter values to pass to sampler; see ``pm.sample()`` for details. method: str The method to use for fitting the model. By default, ``'mcmc'``, in which case the PyMC3 sampler will be used. Alternatively, ``'advi'``, in which case the model will be fitted using automatic differentiation variational inference as implemented in PyMC3. Finally, ``'laplace'``, in which case a laplace approximation is used, ``'laplace'`` is not recommended other than for pedagogical use. init: str Initialization method (see PyMC3 sampler documentation). Currently, this is ``'jitter+adapt_diag'``, but this can change in the future. n_init: int Number of initialization iterations if ``init = 'advi'`` or '``init = 'nuts'``. Default is kind of in PyMC3 for the kinds of models we expect to see run with Bambi, so we lower it considerably. omit_offsets: bool Omits offset terms in the ``InferenceData`` object when the model includes group specific effects. Defaults to ``True``. Returns ------- An ArviZ ``InferenceData`` instance. # this should return an InferenceData object (once arviz adds support for VI) Fit a model using a Laplace approximation. Mainly for pedagogical use. ``mcmc`` and ``advi`` are better approximations. Parameters ---------- model: PyMC3 model Returns ------- Dictionary, the keys are the names of the variables and the values tuples of modes and standard deviations. | 2.115276 | 2 |
tests/functional/gtcs/test_dsql_domain_06.py | reevespaul/firebird-qa | 0 | 6633162 | <gh_stars>0
#coding:utf-8
#
# id: functional.gtcs.dsql_domain_06
# title: GTCS/tests/DSQL_DOMAIN_06. Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and CHECK constraint clause.
# decription:
# Original test see in:
# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_06.script
#
# NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output.
# We display info about domains using common VIEW based on RDB$FIELDS table.
# Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have to skip from showing their blob ID - see substitution.
#
# ::: NOTE :::
# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested.
#
# For each base datatype we:
# * create domain and set default value;
# * alter domain in order to drop default;
# * alter domain in order to set new default;
# * alter domain with doing TWO changes in ONE statement: set new default + drop default;
# * alter domain with doing TWO changes in ONE statement: drop default + set new default.
#
# For some datatypes (float, double precision) we also verify ability to use boundary values for datatype itself.
# For character datatypes we use non-asci characters (currency signs: euro, cent, pound, yena).
#
# Currently following datatypes are NOT checked:
# blob sub_type text not null;
# blob sub_type binary not null; // byt test *does* check BLOB without sub_type specified
# long float not null;
# nchar(20) not null;
# binary(20) not null;
# varbinary(20) not null;
#
# Checked on 4.0.0.1926.
#
# tracker_id:
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = [('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), ('DM_FVALID_BLOB_ID.*', '')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set bail on;
set list on;
create view v_test as
select
ff.rdb$field_name as dm_name
,ff.rdb$field_type as dm_type
,ff.rdb$field_sub_type as dm_subtype
,ff.rdb$field_length as dm_flen
,ff.rdb$field_scale as dm_fscale
,ff.rdb$field_precision as dm_fprec
,ff.rdb$character_set_id as dm_fcset
,ff.rdb$collation_id as dm_fcoll
,ff.rdb$character_length dm_fchrlen
,ff.rdb$null_flag as dm_fnull
,ff.rdb$validation_source as dm_fvalid_blob_id
,ff.rdb$default_source as dm_fdefault_blob_id
from rdb$fields ff
where
ff.rdb$system_flag is distinct from 1
and ff.rdb$field_name starting with upper( 'dom0' )
;
commit;
create domain dom06_01 as smallint default 1111;
alter domain dom06_01 drop default;
alter domain dom06_01 set default 2222;
alter domain dom06_01 set default 9999 drop default;
alter domain dom06_01 drop default set default 3333;
------------------------------------------------------------------------------------------------
create domain dom06_02 as int default 11111;
alter domain dom06_02 drop default;
alter domain dom06_02 set default 22222;
alter domain dom06_02 set default 99999 drop default;
alter domain dom06_02 drop default set default 33333;
------------------------------------------------------------------------------------------------
create domain dom06_03 as bigint default 111111;
alter domain dom06_03 drop default;
alter domain dom06_03 set default 222222;
alter domain dom06_03 set default 999999 drop default;
alter domain dom06_03 drop default set default 333333;
------------------------------------------------------------------------------------------------
create domain dom06_04 as date default current_date;
alter domain dom06_04 drop default;
alter domain dom06_04 set default 'TODAY';
alter domain dom06_04 set default 'TOMORROW' drop default;
alter domain dom06_04 drop default set default 'YESTERDAY';
------------------------------------------------------------------------------------------------
create domain dom06_05 as time default current_time;
alter domain dom06_05 drop default;
alter domain dom06_05 set default current_time;
alter domain dom06_05 set default current_time drop default;
alter domain dom06_05 drop default set default current_time;
------------------------------------------------------------------------------------------------
create domain dom06_06 as time with time zone default '11:11:11.111 Indian/Cocos';
alter domain dom06_06 drop default;
alter domain dom06_06 set default '12:31:42.543 Pacific/Fiji';
alter domain dom06_06 set default '23:34:45.678 Pacific/Galapagos' drop default;
alter domain dom06_06 drop default set default '01:02:03.456 Antarctica/South_Pole';
------------------------------------------------------------------------------------------------
create domain dom06_07 as timestamp not null;
alter domain dom06_07 drop default;
alter domain dom06_07 set default 'now';
alter domain dom06_07 set default current_timestamp drop default;
alter domain dom06_07 drop default set default current_timestamp;
------------------------------------------------------------------------------------------------
create domain dom06_08 as timestamp with time zone default '21.12.2013 11:11:11.111 Indian/Cocos';
alter domain dom06_08 drop default;
alter domain dom06_08 set default '23.01.2014 12:31:42.543 Pacific/Fiji';
alter domain dom06_08 set default '27.03.2015 23:34:45.678 Pacific/Galapagos' drop default;
alter domain dom06_08 drop default set default '29.05.2017 01:02:03.456 Antarctica/South_Pole';
------------------------------------------------------------------------------------------------
create domain dom06_09 as char(1) character set utf8 default '€';
alter domain dom06_09 drop default;
alter domain dom06_09 set default '£';
alter domain dom06_09 set default '¢' drop default;
alter domain dom06_09 drop default set default '¥';
------------------------------------------------------------------------------------------------
create domain dom06_10 as varchar(1) character set utf8 default '€';
alter domain dom06_10 drop default;
alter domain dom06_10 set default '£';
alter domain dom06_10 set default '¢' drop default;
alter domain dom06_10 drop default set default '¥';
------------------------------------------------------------------------------------------------
create domain dom06_11 as nchar(1) default 'Ž'; -- ISO8859_1
alter domain dom06_11 drop default;
alter domain dom06_11 set default 'š';
alter domain dom06_11 set default 'Ÿ' drop default;
alter domain dom06_11 drop default set default '¡';
------------------------------------------------------------------------------------------------
create domain dom06_12 as numeric(2,2) default -327.68;
alter domain dom06_12 drop default;
alter domain dom06_12 set default 327.67;
alter domain dom06_12 set default -327.68 drop default;
alter domain dom06_12 drop default set default 327.67;
------------------------------------------------------------------------------------------------
-- create domain dom06_13 as decimal(20,2) default 170141183460469231731687303715884105727; -- 0x7FFFFFFFFFFFFFFF;
create domain dom06_13 as decimal(20,2) default -999999999999999999;
alter domain dom06_13 drop default;
alter domain dom06_13 set default 99999999999999999999999999999999;
alter domain dom06_13 set default -999999999999999999 drop default;
alter domain dom06_13 drop default set default 99999999999999999999999999999999;
------------------------------------------------------------------------------------------------
-- https://en.wikipedia.org/wiki/Single-precision_floating-point_format, power(2,-149):
-- https://www.wolframalpha.com
-- (largest normal number): (2-power(2,-23)) * power(2,127)
create domain dom06_14 as float default 340282346638528859811704183484516925440;
alter domain dom06_14 drop default;
-- (smallest positive subnormal number): power(2, -149)
alter domain dom06_14 set default 1.40129846432481707092372958328991613128026194187651577175706828388979108268586060148663818836212158203125e-45;
-- (largest number less than one): 1 - power(2,-24)
alter domain dom06_14 set default 0.999999940395355224609375 drop default;
-- (smallest number larger than one): 1 + power(2,-23)
alter domain dom06_14 drop default set default 1.00000011920928955078125;
------------------------------------------------------------------------------------------------
create domain dom06_15 as real default 340282346638528859811704183484516925440; -- = FLOAT
alter domain dom06_15 drop default;
alter domain dom06_15 set default 1.40129846432481707092372958328991613128026194187651577175706828388979108268586060148663818836212158203125e-45;
alter domain dom06_15 set default 0.999999940395355224609375 drop default;
alter domain dom06_15 drop default set default 1.00000011920928955078125;
------------------------------------------------------------------------------------------------
-- https://en.wikipedia.org/wiki/Double-precision_floating-point_format
-- create domain dom06_16 as double precision default 0xF0000000; -- 0x7fefffffffffffff;
--create domain dm_testd as double precision default 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368;
--create table test(x dom06_16);
--insert into test default values returning x; -- Statement failed, SQLSTATE = 22003 / Floating-point overflow. The exponent of a floating-point operation is greater than the magnitude allowed.
-- 1.79769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458... × 10^308
--create domain dom06_16 as double precision default 1.797693134862315708145e308;
-- (max double): power(2,1023) * (1+(1-power(2,-52))
create domain dom06_16 as double precision default 1.797693134862315708e308; -- 1.797693134862315708e3081 => SQLSTATE = 22003 / Floating-point overflow
alter domain dom06_16 drop default;
-- (Min. subnormal positive double) power(2,-1074)
-- 4.940656458412465441765687928682213723650598026143247... × 10^-324
-- (Max. subnormal double) power(2,-1022) * (1 - power(2,-52))
-- 2.225073858507200889024586876085859887650423112240959... × 10^-308
-- alter domain dom06_16 set default 2.225073858507200889024586876085859887650423112240959e-308; -- 0.00000000
alter domain dom06_16 set default 2e-308;
-- 1 + power(2,-52) = 1.0000000000000002, the smallest number > 1
-- 1.0000000000000002220446049250313080847263336181640625
alter domain dom06_16 set default 1.0000000000000002220446049250313080847263336181640625 drop default;
alter domain dom06_16 drop default set default 1.0000000000000006;
-----------------------------------------------------------------------------------------------
create domain dom06_17 as blob default
'
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
012345678901234567
'
;
alter domain dom06_17 drop default;
alter domain dom06_17 set default
'
'; -- several empty lines here
alter domain dom06_17 set default null drop default;
alter domain dom06_17 drop default set default
'
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
012345678901234567
'
;
----------------------------------------------------------------------------------------------------
create domain dom06_18 as boolean default false;
alter domain dom06_18 drop default;
alter domain dom06_18 set default true;
alter domain dom06_18 set default null drop default;
alter domain dom06_18 drop default set default false;
----------------------------------------------------------------------------------------------------
create domain dom06_19 as decfloat default -9.999999999999999999999999999999999E6144;
alter domain dom06_19 drop default;
alter domain dom06_19 set default 9.999999999999999999999999999999999E6144;
alter domain dom06_19 set default null drop default;
alter domain dom06_19 drop default set default -1.0E-6143;
----------------------------------------------------------------------------------------------------
commit;
set count on;
select * from v_test;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
DM_NAME DOM06_01
DM_TYPE 7
DM_SUBTYPE 0
DM_FLEN 2
DM_FSCALE 0
DM_FPREC 0
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1e8
default 3333
DM_NAME DOM06_02
DM_TYPE 8
DM_SUBTYPE 0
DM_FLEN 4
DM_FSCALE 0
DM_FPREC 0
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1ec
default 33333
DM_NAME DOM06_03
DM_TYPE 16
DM_SUBTYPE 0
DM_FLEN 8
DM_FSCALE 0
DM_FPREC 0
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1f0
default 333333
DM_NAME DOM06_04
DM_TYPE 12
DM_SUBTYPE <null>
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1f4
default 'YESTERDAY'
DM_NAME DOM06_05
DM_TYPE 13
DM_SUBTYPE <null>
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1f8
default current_time
DM_NAME DOM06_06
DM_TYPE 28
DM_SUBTYPE <null>
DM_FLEN 8
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1fc
default '01:02:03.456 Antarctica/South_Pole'
DM_NAME DOM06_07
DM_TYPE 35
DM_SUBTYPE <null>
DM_FLEN 8
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL 1
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:200
default current_timestamp
DM_NAME DOM06_08
DM_TYPE 29
DM_SUBTYPE <null>
DM_FLEN 12
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:204
default '29.05.2017 01:02:03.456 Antarctica/South_Pole'
DM_NAME DOM06_09
DM_TYPE 14
DM_SUBTYPE 0
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET 4
DM_FCOLL 0
DM_FCHRLEN 1
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:208
default '¥'
DM_NAME DOM06_10
DM_TYPE 37
DM_SUBTYPE 0
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET 4
DM_FCOLL 0
DM_FCHRLEN 1
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:20c
default '¥'
DM_NAME DOM06_11
DM_TYPE 14
DM_SUBTYPE 0
DM_FLEN 1
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET 21
DM_FCOLL 0
DM_FCHRLEN 1
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:210
default '¡'
DM_NAME DOM06_12
DM_TYPE 7
DM_SUBTYPE 1
DM_FLEN 2
DM_FSCALE -2
DM_FPREC 2
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:214
default 327.67
DM_NAME DOM06_13
DM_TYPE 26
DM_SUBTYPE 2
DM_FLEN 16
DM_FSCALE -2
DM_FPREC 20
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:218
default 99999999999999999999999999999999
DM_NAME DOM06_14
DM_TYPE 10
DM_SUBTYPE <null>
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:21c
default 1.00000011920928955078125
DM_NAME DOM06_15
DM_TYPE 10
DM_SUBTYPE <null>
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:220
default 1.00000011920928955078125
DM_NAME DOM06_16
DM_TYPE 27
DM_SUBTYPE <null>
DM_FLEN 8
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:224
default 1.0000000000000006
DM_NAME DOM06_17
DM_TYPE 261
DM_SUBTYPE 0
DM_FLEN 8
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:228
default
'
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
012345678901234567
'
DM_NAME DOM06_18
DM_TYPE 23
DM_SUBTYPE <null>
DM_FLEN 1
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:22c
default false
DM_NAME DOM06_19
DM_TYPE 25
DM_SUBTYPE <null>
DM_FLEN 16
DM_FSCALE 0
DM_FPREC 34
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:230
default -1.0E-6143
Records affected: 19
"""
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| #coding:utf-8
#
# id: functional.gtcs.dsql_domain_06
# title: GTCS/tests/DSQL_DOMAIN_06. Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and CHECK constraint clause.
# decription:
# Original test see in:
# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_06.script
#
# NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output.
# We display info about domains using common VIEW based on RDB$FIELDS table.
# Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have to skip from showing their blob ID - see substitution.
#
# ::: NOTE :::
# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested.
#
# For each base datatype we:
# * create domain and set default value;
# * alter domain in order to drop default;
# * alter domain in order to set new default;
# * alter domain with doing TWO changes in ONE statement: set new default + drop default;
# * alter domain with doing TWO changes in ONE statement: drop default + set new default.
#
# For some datatypes (float, double precision) we also verify ability to use boundary values for datatype itself.
# For character datatypes we use non-asci characters (currency signs: euro, cent, pound, yena).
#
# Currently following datatypes are NOT checked:
# blob sub_type text not null;
# blob sub_type binary not null; // byt test *does* check BLOB without sub_type specified
# long float not null;
# nchar(20) not null;
# binary(20) not null;
# varbinary(20) not null;
#
# Checked on 4.0.0.1926.
#
# tracker_id:
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = [('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), ('DM_FVALID_BLOB_ID.*', '')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set bail on;
set list on;
create view v_test as
select
ff.rdb$field_name as dm_name
,ff.rdb$field_type as dm_type
,ff.rdb$field_sub_type as dm_subtype
,ff.rdb$field_length as dm_flen
,ff.rdb$field_scale as dm_fscale
,ff.rdb$field_precision as dm_fprec
,ff.rdb$character_set_id as dm_fcset
,ff.rdb$collation_id as dm_fcoll
,ff.rdb$character_length dm_fchrlen
,ff.rdb$null_flag as dm_fnull
,ff.rdb$validation_source as dm_fvalid_blob_id
,ff.rdb$default_source as dm_fdefault_blob_id
from rdb$fields ff
where
ff.rdb$system_flag is distinct from 1
and ff.rdb$field_name starting with upper( 'dom0' )
;
commit;
create domain dom06_01 as smallint default 1111;
alter domain dom06_01 drop default;
alter domain dom06_01 set default 2222;
alter domain dom06_01 set default 9999 drop default;
alter domain dom06_01 drop default set default 3333;
------------------------------------------------------------------------------------------------
create domain dom06_02 as int default 11111;
alter domain dom06_02 drop default;
alter domain dom06_02 set default 22222;
alter domain dom06_02 set default 99999 drop default;
alter domain dom06_02 drop default set default 33333;
------------------------------------------------------------------------------------------------
create domain dom06_03 as bigint default 111111;
alter domain dom06_03 drop default;
alter domain dom06_03 set default 222222;
alter domain dom06_03 set default 999999 drop default;
alter domain dom06_03 drop default set default 333333;
------------------------------------------------------------------------------------------------
create domain dom06_04 as date default current_date;
alter domain dom06_04 drop default;
alter domain dom06_04 set default 'TODAY';
alter domain dom06_04 set default 'TOMORROW' drop default;
alter domain dom06_04 drop default set default 'YESTERDAY';
------------------------------------------------------------------------------------------------
create domain dom06_05 as time default current_time;
alter domain dom06_05 drop default;
alter domain dom06_05 set default current_time;
alter domain dom06_05 set default current_time drop default;
alter domain dom06_05 drop default set default current_time;
------------------------------------------------------------------------------------------------
create domain dom06_06 as time with time zone default '11:11:11.111 Indian/Cocos';
alter domain dom06_06 drop default;
alter domain dom06_06 set default '12:31:42.543 Pacific/Fiji';
alter domain dom06_06 set default '23:34:45.678 Pacific/Galapagos' drop default;
alter domain dom06_06 drop default set default '01:02:03.456 Antarctica/South_Pole';
------------------------------------------------------------------------------------------------
create domain dom06_07 as timestamp not null;
alter domain dom06_07 drop default;
alter domain dom06_07 set default 'now';
alter domain dom06_07 set default current_timestamp drop default;
alter domain dom06_07 drop default set default current_timestamp;
------------------------------------------------------------------------------------------------
create domain dom06_08 as timestamp with time zone default '21.12.2013 11:11:11.111 Indian/Cocos';
alter domain dom06_08 drop default;
alter domain dom06_08 set default '23.01.2014 12:31:42.543 Pacific/Fiji';
alter domain dom06_08 set default '27.03.2015 23:34:45.678 Pacific/Galapagos' drop default;
alter domain dom06_08 drop default set default '29.05.2017 01:02:03.456 Antarctica/South_Pole';
------------------------------------------------------------------------------------------------
create domain dom06_09 as char(1) character set utf8 default '€';
alter domain dom06_09 drop default;
alter domain dom06_09 set default '£';
alter domain dom06_09 set default '¢' drop default;
alter domain dom06_09 drop default set default '¥';
------------------------------------------------------------------------------------------------
create domain dom06_10 as varchar(1) character set utf8 default '€';
alter domain dom06_10 drop default;
alter domain dom06_10 set default '£';
alter domain dom06_10 set default '¢' drop default;
alter domain dom06_10 drop default set default '¥';
------------------------------------------------------------------------------------------------
create domain dom06_11 as nchar(1) default 'Ž'; -- ISO8859_1
alter domain dom06_11 drop default;
alter domain dom06_11 set default 'š';
alter domain dom06_11 set default 'Ÿ' drop default;
alter domain dom06_11 drop default set default '¡';
------------------------------------------------------------------------------------------------
create domain dom06_12 as numeric(2,2) default -327.68;
alter domain dom06_12 drop default;
alter domain dom06_12 set default 327.67;
alter domain dom06_12 set default -327.68 drop default;
alter domain dom06_12 drop default set default 327.67;
------------------------------------------------------------------------------------------------
-- create domain dom06_13 as decimal(20,2) default 170141183460469231731687303715884105727; -- 0x7FFFFFFFFFFFFFFF;
create domain dom06_13 as decimal(20,2) default -999999999999999999;
alter domain dom06_13 drop default;
alter domain dom06_13 set default 99999999999999999999999999999999;
alter domain dom06_13 set default -999999999999999999 drop default;
alter domain dom06_13 drop default set default 99999999999999999999999999999999;
------------------------------------------------------------------------------------------------
-- https://en.wikipedia.org/wiki/Single-precision_floating-point_format, power(2,-149):
-- https://www.wolframalpha.com
-- (largest normal number): (2-power(2,-23)) * power(2,127)
create domain dom06_14 as float default 340282346638528859811704183484516925440;
alter domain dom06_14 drop default;
-- (smallest positive subnormal number): power(2, -149)
alter domain dom06_14 set default 1.40129846432481707092372958328991613128026194187651577175706828388979108268586060148663818836212158203125e-45;
-- (largest number less than one): 1 - power(2,-24)
alter domain dom06_14 set default 0.999999940395355224609375 drop default;
-- (smallest number larger than one): 1 + power(2,-23)
alter domain dom06_14 drop default set default 1.00000011920928955078125;
------------------------------------------------------------------------------------------------
create domain dom06_15 as real default 340282346638528859811704183484516925440; -- = FLOAT
alter domain dom06_15 drop default;
alter domain dom06_15 set default 1.40129846432481707092372958328991613128026194187651577175706828388979108268586060148663818836212158203125e-45;
alter domain dom06_15 set default 0.999999940395355224609375 drop default;
alter domain dom06_15 drop default set default 1.00000011920928955078125;
------------------------------------------------------------------------------------------------
-- https://en.wikipedia.org/wiki/Double-precision_floating-point_format
-- create domain dom06_16 as double precision default 0xF0000000; -- 0x7fefffffffffffff;
--create domain dm_testd as double precision default 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368;
--create table test(x dom06_16);
--insert into test default values returning x; -- Statement failed, SQLSTATE = 22003 / Floating-point overflow. The exponent of a floating-point operation is greater than the magnitude allowed.
-- 1.79769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458... × 10^308
--create domain dom06_16 as double precision default 1.797693134862315708145e308;
-- (max double): power(2,1023) * (1+(1-power(2,-52))
create domain dom06_16 as double precision default 1.797693134862315708e308; -- 1.797693134862315708e3081 => SQLSTATE = 22003 / Floating-point overflow
alter domain dom06_16 drop default;
-- (Min. subnormal positive double) power(2,-1074)
-- 4.940656458412465441765687928682213723650598026143247... × 10^-324
-- (Max. subnormal double) power(2,-1022) * (1 - power(2,-52))
-- 2.225073858507200889024586876085859887650423112240959... × 10^-308
-- alter domain dom06_16 set default 2.225073858507200889024586876085859887650423112240959e-308; -- 0.00000000
alter domain dom06_16 set default 2e-308;
-- 1 + power(2,-52) = 1.0000000000000002, the smallest number > 1
-- 1.0000000000000002220446049250313080847263336181640625
alter domain dom06_16 set default 1.0000000000000002220446049250313080847263336181640625 drop default;
alter domain dom06_16 drop default set default 1.0000000000000006;
-----------------------------------------------------------------------------------------------
create domain dom06_17 as blob default
'
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
012345678901234567
'
;
alter domain dom06_17 drop default;
alter domain dom06_17 set default
'
'; -- several empty lines here
alter domain dom06_17 set default null drop default;
alter domain dom06_17 drop default set default
'
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
012345678901234567
'
;
----------------------------------------------------------------------------------------------------
create domain dom06_18 as boolean default false;
alter domain dom06_18 drop default;
alter domain dom06_18 set default true;
alter domain dom06_18 set default null drop default;
alter domain dom06_18 drop default set default false;
----------------------------------------------------------------------------------------------------
create domain dom06_19 as decfloat default -9.999999999999999999999999999999999E6144;
alter domain dom06_19 drop default;
alter domain dom06_19 set default 9.999999999999999999999999999999999E6144;
alter domain dom06_19 set default null drop default;
alter domain dom06_19 drop default set default -1.0E-6143;
----------------------------------------------------------------------------------------------------
commit;
set count on;
select * from v_test;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
DM_NAME DOM06_01
DM_TYPE 7
DM_SUBTYPE 0
DM_FLEN 2
DM_FSCALE 0
DM_FPREC 0
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1e8
default 3333
DM_NAME DOM06_02
DM_TYPE 8
DM_SUBTYPE 0
DM_FLEN 4
DM_FSCALE 0
DM_FPREC 0
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1ec
default 33333
DM_NAME DOM06_03
DM_TYPE 16
DM_SUBTYPE 0
DM_FLEN 8
DM_FSCALE 0
DM_FPREC 0
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1f0
default 333333
DM_NAME DOM06_04
DM_TYPE 12
DM_SUBTYPE <null>
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1f4
default 'YESTERDAY'
DM_NAME DOM06_05
DM_TYPE 13
DM_SUBTYPE <null>
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1f8
default current_time
DM_NAME DOM06_06
DM_TYPE 28
DM_SUBTYPE <null>
DM_FLEN 8
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:1fc
default '01:02:03.456 Antarctica/South_Pole'
DM_NAME DOM06_07
DM_TYPE 35
DM_SUBTYPE <null>
DM_FLEN 8
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL 1
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:200
default current_timestamp
DM_NAME DOM06_08
DM_TYPE 29
DM_SUBTYPE <null>
DM_FLEN 12
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:204
default '29.05.2017 01:02:03.456 Antarctica/South_Pole'
DM_NAME DOM06_09
DM_TYPE 14
DM_SUBTYPE 0
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET 4
DM_FCOLL 0
DM_FCHRLEN 1
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:208
default '¥'
DM_NAME DOM06_10
DM_TYPE 37
DM_SUBTYPE 0
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET 4
DM_FCOLL 0
DM_FCHRLEN 1
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:20c
default '¥'
DM_NAME DOM06_11
DM_TYPE 14
DM_SUBTYPE 0
DM_FLEN 1
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET 21
DM_FCOLL 0
DM_FCHRLEN 1
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:210
default '¡'
DM_NAME DOM06_12
DM_TYPE 7
DM_SUBTYPE 1
DM_FLEN 2
DM_FSCALE -2
DM_FPREC 2
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:214
default 327.67
DM_NAME DOM06_13
DM_TYPE 26
DM_SUBTYPE 2
DM_FLEN 16
DM_FSCALE -2
DM_FPREC 20
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:218
default 99999999999999999999999999999999
DM_NAME DOM06_14
DM_TYPE 10
DM_SUBTYPE <null>
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:21c
default 1.00000011920928955078125
DM_NAME DOM06_15
DM_TYPE 10
DM_SUBTYPE <null>
DM_FLEN 4
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:220
default 1.00000011920928955078125
DM_NAME DOM06_16
DM_TYPE 27
DM_SUBTYPE <null>
DM_FLEN 8
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:224
default 1.0000000000000006
DM_NAME DOM06_17
DM_TYPE 261
DM_SUBTYPE 0
DM_FLEN 8
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:228
default
'
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
012345678901234567
'
DM_NAME DOM06_18
DM_TYPE 23
DM_SUBTYPE <null>
DM_FLEN 1
DM_FSCALE 0
DM_FPREC <null>
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:22c
default false
DM_NAME DOM06_19
DM_TYPE 25
DM_SUBTYPE <null>
DM_FLEN 16
DM_FSCALE 0
DM_FPREC 34
DM_FCSET <null>
DM_FCOLL <null>
DM_FCHRLEN <null>
DM_FNULL <null>
DM_FVALID_BLOB_ID <null>
DM_FDEFAULT_BLOB_ID 2:230
default -1.0E-6143
Records affected: 19
"""
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout | en | 0.153244 | #coding:utf-8 # # id: functional.gtcs.dsql_domain_06 # title: GTCS/tests/DSQL_DOMAIN_06. Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and CHECK constraint clause. # decription: # Original test see in: # https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_06.script # # NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. # We display info about domains using common VIEW based on RDB$FIELDS table. # Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have to skip from showing their blob ID - see substitution. # # ::: NOTE ::: # Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. # # For each base datatype we: # * create domain and set default value; # * alter domain in order to drop default; # * alter domain in order to set new default; # * alter domain with doing TWO changes in ONE statement: set new default + drop default; # * alter domain with doing TWO changes in ONE statement: drop default + set new default. # # For some datatypes (float, double precision) we also verify ability to use boundary values for datatype itself. # For character datatypes we use non-asci characters (currency signs: euro, cent, pound, yena). # # Currently following datatypes are NOT checked: # blob sub_type text not null; # blob sub_type binary not null; // byt test *does* check BLOB without sub_type specified # long float not null; # nchar(20) not null; # binary(20) not null; # varbinary(20) not null; # # Checked on 4.0.0.1926. # # tracker_id: # min_versions: ['4.0'] # versions: 4.0 # qmid: None # version: 4.0 # resources: None set bail on; set list on; create view v_test as select ff.rdb$field_name as dm_name ,ff.rdb$field_type as dm_type ,ff.rdb$field_sub_type as dm_subtype ,ff.rdb$field_length as dm_flen ,ff.rdb$field_scale as dm_fscale ,ff.rdb$field_precision as dm_fprec ,ff.rdb$character_set_id as dm_fcset ,ff.rdb$collation_id as dm_fcoll ,ff.rdb$character_length dm_fchrlen ,ff.rdb$null_flag as dm_fnull ,ff.rdb$validation_source as dm_fvalid_blob_id ,ff.rdb$default_source as dm_fdefault_blob_id from rdb$fields ff where ff.rdb$system_flag is distinct from 1 and ff.rdb$field_name starting with upper( 'dom0' ) ; commit; create domain dom06_01 as smallint default 1111; alter domain dom06_01 drop default; alter domain dom06_01 set default 2222; alter domain dom06_01 set default 9999 drop default; alter domain dom06_01 drop default set default 3333; ------------------------------------------------------------------------------------------------ create domain dom06_02 as int default 11111; alter domain dom06_02 drop default; alter domain dom06_02 set default 22222; alter domain dom06_02 set default 99999 drop default; alter domain dom06_02 drop default set default 33333; ------------------------------------------------------------------------------------------------ create domain dom06_03 as bigint default 111111; alter domain dom06_03 drop default; alter domain dom06_03 set default 222222; alter domain dom06_03 set default 999999 drop default; alter domain dom06_03 drop default set default 333333; ------------------------------------------------------------------------------------------------ create domain dom06_04 as date default current_date; alter domain dom06_04 drop default; alter domain dom06_04 set default 'TODAY'; alter domain dom06_04 set default 'TOMORROW' drop default; alter domain dom06_04 drop default set default 'YESTERDAY'; ------------------------------------------------------------------------------------------------ create domain dom06_05 as time default current_time; alter domain dom06_05 drop default; alter domain dom06_05 set default current_time; alter domain dom06_05 set default current_time drop default; alter domain dom06_05 drop default set default current_time; ------------------------------------------------------------------------------------------------ create domain dom06_06 as time with time zone default '11:11:11.111 Indian/Cocos'; alter domain dom06_06 drop default; alter domain dom06_06 set default '12:31:42.543 Pacific/Fiji'; alter domain dom06_06 set default '23:34:45.678 Pacific/Galapagos' drop default; alter domain dom06_06 drop default set default '01:02:03.456 Antarctica/South_Pole'; ------------------------------------------------------------------------------------------------ create domain dom06_07 as timestamp not null; alter domain dom06_07 drop default; alter domain dom06_07 set default 'now'; alter domain dom06_07 set default current_timestamp drop default; alter domain dom06_07 drop default set default current_timestamp; ------------------------------------------------------------------------------------------------ create domain dom06_08 as timestamp with time zone default '21.12.2013 11:11:11.111 Indian/Cocos'; alter domain dom06_08 drop default; alter domain dom06_08 set default '23.01.2014 12:31:42.543 Pacific/Fiji'; alter domain dom06_08 set default '27.03.2015 23:34:45.678 Pacific/Galapagos' drop default; alter domain dom06_08 drop default set default '29.05.2017 01:02:03.456 Antarctica/South_Pole'; ------------------------------------------------------------------------------------------------ create domain dom06_09 as char(1) character set utf8 default '€'; alter domain dom06_09 drop default; alter domain dom06_09 set default '£'; alter domain dom06_09 set default '¢' drop default; alter domain dom06_09 drop default set default '¥'; ------------------------------------------------------------------------------------------------ create domain dom06_10 as varchar(1) character set utf8 default '€'; alter domain dom06_10 drop default; alter domain dom06_10 set default '£'; alter domain dom06_10 set default '¢' drop default; alter domain dom06_10 drop default set default '¥'; ------------------------------------------------------------------------------------------------ create domain dom06_11 as nchar(1) default 'Ž'; -- ISO8859_1 alter domain dom06_11 drop default; alter domain dom06_11 set default 'š'; alter domain dom06_11 set default 'Ÿ' drop default; alter domain dom06_11 drop default set default '¡'; ------------------------------------------------------------------------------------------------ create domain dom06_12 as numeric(2,2) default -327.68; alter domain dom06_12 drop default; alter domain dom06_12 set default 327.67; alter domain dom06_12 set default -327.68 drop default; alter domain dom06_12 drop default set default 327.67; ------------------------------------------------------------------------------------------------ -- create domain dom06_13 as decimal(20,2) default 170141183460469231731687303715884105727; -- 0x7FFFFFFFFFFFFFFF; create domain dom06_13 as decimal(20,2) default -999999999999999999; alter domain dom06_13 drop default; alter domain dom06_13 set default 99999999999999999999999999999999; alter domain dom06_13 set default -999999999999999999 drop default; alter domain dom06_13 drop default set default 99999999999999999999999999999999; ------------------------------------------------------------------------------------------------ -- https://en.wikipedia.org/wiki/Single-precision_floating-point_format, power(2,-149): -- https://www.wolframalpha.com -- (largest normal number): (2-power(2,-23)) * power(2,127) create domain dom06_14 as float default 340282346638528859811704183484516925440; alter domain dom06_14 drop default; -- (smallest positive subnormal number): power(2, -149) alter domain dom06_14 set default 1.40129846432481707092372958328991613128026194187651577175706828388979108268586060148663818836212158203125e-45; -- (largest number less than one): 1 - power(2,-24) alter domain dom06_14 set default 0.999999940395355224609375 drop default; -- (smallest number larger than one): 1 + power(2,-23) alter domain dom06_14 drop default set default 1.00000011920928955078125; ------------------------------------------------------------------------------------------------ create domain dom06_15 as real default 340282346638528859811704183484516925440; -- = FLOAT alter domain dom06_15 drop default; alter domain dom06_15 set default 1.40129846432481707092372958328991613128026194187651577175706828388979108268586060148663818836212158203125e-45; alter domain dom06_15 set default 0.999999940395355224609375 drop default; alter domain dom06_15 drop default set default 1.00000011920928955078125; ------------------------------------------------------------------------------------------------ -- https://en.wikipedia.org/wiki/Double-precision_floating-point_format -- create domain dom06_16 as double precision default 0xF0000000; -- 0x7fefffffffffffff; --create domain dm_testd as double precision default 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368; --create table test(x dom06_16); --insert into test default values returning x; -- Statement failed, SQLSTATE = 22003 / Floating-point overflow. The exponent of a floating-point operation is greater than the magnitude allowed. -- 1.79769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458... × 10^308 --create domain dom06_16 as double precision default 1.797693134862315708145e308; -- (max double): power(2,1023) * (1+(1-power(2,-52)) create domain dom06_16 as double precision default 1.797693134862315708e308; -- 1.797693134862315708e3081 => SQLSTATE = 22003 / Floating-point overflow alter domain dom06_16 drop default; -- (Min. subnormal positive double) power(2,-1074) -- 4.940656458412465441765687928682213723650598026143247... × 10^-324 -- (Max. subnormal double) power(2,-1022) * (1 - power(2,-52)) -- 2.225073858507200889024586876085859887650423112240959... × 10^-308 -- alter domain dom06_16 set default 2.225073858507200889024586876085859887650423112240959e-308; -- 0.00000000 alter domain dom06_16 set default 2e-308; -- 1 + power(2,-52) = 1.0000000000000002, the smallest number > 1 -- 1.0000000000000002220446049250313080847263336181640625 alter domain dom06_16 set default 1.0000000000000002220446049250313080847263336181640625 drop default; alter domain dom06_16 drop default set default 1.0000000000000006; ----------------------------------------------------------------------------------------------- create domain dom06_17 as blob default ' 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 012345678901234567 ' ; alter domain dom06_17 drop default; alter domain dom06_17 set default ' '; -- several empty lines here alter domain dom06_17 set default null drop default; alter domain dom06_17 drop default set default ' 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 012345678901234567 ' ; ---------------------------------------------------------------------------------------------------- create domain dom06_18 as boolean default false; alter domain dom06_18 drop default; alter domain dom06_18 set default true; alter domain dom06_18 set default null drop default; alter domain dom06_18 drop default set default false; ---------------------------------------------------------------------------------------------------- create domain dom06_19 as decfloat default -9.999999999999999999999999999999999E6144; alter domain dom06_19 drop default; alter domain dom06_19 set default 9.999999999999999999999999999999999E6144; alter domain dom06_19 set default null drop default; alter domain dom06_19 drop default set default -1.0E-6143; ---------------------------------------------------------------------------------------------------- commit; set count on; select * from v_test; DM_NAME DOM06_01 DM_TYPE 7 DM_SUBTYPE 0 DM_FLEN 2 DM_FSCALE 0 DM_FPREC 0 DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:1e8 default 3333 DM_NAME DOM06_02 DM_TYPE 8 DM_SUBTYPE 0 DM_FLEN 4 DM_FSCALE 0 DM_FPREC 0 DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:1ec default 33333 DM_NAME DOM06_03 DM_TYPE 16 DM_SUBTYPE 0 DM_FLEN 8 DM_FSCALE 0 DM_FPREC 0 DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:1f0 default 333333 DM_NAME DOM06_04 DM_TYPE 12 DM_SUBTYPE <null> DM_FLEN 4 DM_FSCALE 0 DM_FPREC <null> DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:1f4 default 'YESTERDAY' DM_NAME DOM06_05 DM_TYPE 13 DM_SUBTYPE <null> DM_FLEN 4 DM_FSCALE 0 DM_FPREC <null> DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:1f8 default current_time DM_NAME DOM06_06 DM_TYPE 28 DM_SUBTYPE <null> DM_FLEN 8 DM_FSCALE 0 DM_FPREC <null> DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:1fc default '01:02:03.456 Antarctica/South_Pole' DM_NAME DOM06_07 DM_TYPE 35 DM_SUBTYPE <null> DM_FLEN 8 DM_FSCALE 0 DM_FPREC <null> DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL 1 DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:200 default current_timestamp DM_NAME DOM06_08 DM_TYPE 29 DM_SUBTYPE <null> DM_FLEN 12 DM_FSCALE 0 DM_FPREC <null> DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:204 default '29.05.2017 01:02:03.456 Antarctica/South_Pole' DM_NAME DOM06_09 DM_TYPE 14 DM_SUBTYPE 0 DM_FLEN 4 DM_FSCALE 0 DM_FPREC <null> DM_FCSET 4 DM_FCOLL 0 DM_FCHRLEN 1 DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:208 default '¥' DM_NAME DOM06_10 DM_TYPE 37 DM_SUBTYPE 0 DM_FLEN 4 DM_FSCALE 0 DM_FPREC <null> DM_FCSET 4 DM_FCOLL 0 DM_FCHRLEN 1 DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:20c default '¥' DM_NAME DOM06_11 DM_TYPE 14 DM_SUBTYPE 0 DM_FLEN 1 DM_FSCALE 0 DM_FPREC <null> DM_FCSET 21 DM_FCOLL 0 DM_FCHRLEN 1 DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:210 default '¡' DM_NAME DOM06_12 DM_TYPE 7 DM_SUBTYPE 1 DM_FLEN 2 DM_FSCALE -2 DM_FPREC 2 DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:214 default 327.67 DM_NAME DOM06_13 DM_TYPE 26 DM_SUBTYPE 2 DM_FLEN 16 DM_FSCALE -2 DM_FPREC 20 DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:218 default 99999999999999999999999999999999 DM_NAME DOM06_14 DM_TYPE 10 DM_SUBTYPE <null> DM_FLEN 4 DM_FSCALE 0 DM_FPREC <null> DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:21c default 1.00000011920928955078125 DM_NAME DOM06_15 DM_TYPE 10 DM_SUBTYPE <null> DM_FLEN 4 DM_FSCALE 0 DM_FPREC <null> DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:220 default 1.00000011920928955078125 DM_NAME DOM06_16 DM_TYPE 27 DM_SUBTYPE <null> DM_FLEN 8 DM_FSCALE 0 DM_FPREC <null> DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:224 default 1.0000000000000006 DM_NAME DOM06_17 DM_TYPE 261 DM_SUBTYPE 0 DM_FLEN 8 DM_FSCALE 0 DM_FPREC <null> DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:228 default ' 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 012345678901234567 ' DM_NAME DOM06_18 DM_TYPE 23 DM_SUBTYPE <null> DM_FLEN 1 DM_FSCALE 0 DM_FPREC <null> DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:22c default false DM_NAME DOM06_19 DM_TYPE 25 DM_SUBTYPE <null> DM_FLEN 16 DM_FSCALE 0 DM_FPREC 34 DM_FCSET <null> DM_FCOLL <null> DM_FCHRLEN <null> DM_FNULL <null> DM_FVALID_BLOB_ID <null> DM_FDEFAULT_BLOB_ID 2:230 default -1.0E-6143 Records affected: 19 | 1.812957 | 2 |
closed/NVIDIA/code/main.py | EldritchJS/inference_results_v0.5 | 0 | 6633163 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import os, sys
sys.path.insert(0, os.getcwd())
from code.common.scopedMPS import ScopedMPS, turn_off_mps
from code.common import logging
from code.common import args_to_string, find_config_files, load_configs, run_command
import code.common.arguments as common_args
from importlib import import_module
import multiprocessing as mp
from multiprocessing import Process
def get_benchmark(benchmark_name, conf):
# Do not use a map. We want to import benchmarks as we need them, because some take
# time to load due to plugins.
if benchmark_name == "resnet":
ResNet50 = import_module("code.resnet.tensorrt.ResNet50").ResNet50
return ResNet50(conf)
elif benchmark_name == "mobilenet":
MobileNet = import_module("code.mobilenet.tensorrt.MobileNet").MobileNet
return MobileNet(conf)
elif benchmark_name == "ssd-small":
SSDMobileNet = import_module("code.ssd-small.tensorrt.SSDMobileNet").SSDMobileNet
return SSDMobileNet(conf)
elif benchmark_name == "ssd-large":
SSDResNet34 = import_module("code.ssd-large.tensorrt.SSDResNet34").SSDResNet34
return SSDResNet34(conf)
elif benchmark_name == "gnmt":
GNMTBuilder = import_module("code.gnmt.tensorrt.GNMT").GNMTBuilder
return GNMTBuilder(conf)
else:
raise ValueError("Unknown benchmark: {:}".format(benchmark_name))
def apply_overrides(config, keys):
# Make a copy so we don't modify original dict
config = dict(config)
override_args = common_args.parse_args(keys)
for key in override_args:
# Unset values (None) and unset store_true values (False) are both false-y
if override_args[key]:
config[key] = override_args[key]
return config
def launch_handle_generate_engine(benchmark_name, config, gpu, dla):
retries = 3
timeout = 7200
success = False
for i in range(retries):
# Build engines in another process to make sure we exit with clean cuda context so that MPS can be turned off.
from code.main import handle_generate_engine
p = Process(target=handle_generate_engine, args=(benchmark_name, config, gpu, dla))
p.start()
try:
p.join(timeout)
except KeyboardInterrupt:
p.terminate()
p.join(timeout)
raise KeyboardInterrupt
if p.exitcode == 0:
success = True
break
if not success:
raise RuntimeError("Building engines failed!")
def handle_generate_engine(benchmark_name, config, gpu=True, dla=True):
logging.info("Building engines for {:} benchmark in {:} scenario...".format(benchmark_name, config["scenario"]))
if benchmark_name == "gnmt":
arglist = common_args.GNMT_ENGINE_ARGS
else:
arglist = common_args.GENERATE_ENGINE_ARGS
config = apply_overrides(config, arglist)
if dla and "dla_batch_size" in config:
config["batch_size"] = config["dla_batch_size"]
logging.info("Building DLA engine for {:}_{:}_{:}".format(config["system_id"], benchmark_name, config["scenario"]))
b = get_benchmark(benchmark_name, config)
b.build_engines()
if gpu and "gpu_batch_size" in config:
config["batch_size"] = config["gpu_batch_size"]
config["dla_core"] = None
logging.info("Building GPU engine for {:}_{:}_{:}".format(config["system_id"], benchmark_name, config["scenario"]))
b = get_benchmark(benchmark_name, config)
b.build_engines()
if gpu and config["scenario"] == "Server" and benchmark_name == "gnmt":
b = get_benchmark(benchmark_name, config)
b.build_engines()
logging.info("Finished building engines for {:} benchmark in {:} scenario.".format(benchmark_name, config["scenario"]))
def handle_run_harness(benchmark_name, config, gpu=True, dla=True):
logging.info("Running harness for {:} benchmark in {:} scenario...".format(benchmark_name, config["scenario"]))
if config["scenario"] == "SingleStream":
arglist = common_args.SINGLE_STREAM_HARNESS_ARGS
elif config["scenario"] == "Offline":
arglist = common_args.OFFLINE_HARNESS_ARGS
elif config["scenario"] == "MultiStream":
arglist = common_args.MULTI_STREAM_HARNESS_ARGS
elif config["scenario"] == "Server":
arglist = common_args.SERVER_HARNESS_ARGS
if benchmark_name == "gnmt":
arglist = common_args.GNMT_HARNESS_ARGS
config = apply_overrides(config, arglist)
# Validate arguments
if not dla:
config["dla_batch_size"] = None
if not gpu:
config["gpu_batch_size"] = None
if benchmark_name == "gnmt":
from code.common.harness import GNMTHarness
harness = GNMTHarness(config, name=benchmark_name)
else:
from code.common.harness import BenchmarkHarness
harness = BenchmarkHarness(config, name=benchmark_name)
result = harness.run_harness()
logging.info("Result: {:}".format(result))
# Append result to perf result summary log.
log_dir = config["log_dir"]
summary_file = os.path.join(log_dir, "perf_harness_summary.json")
results = {}
if os.path.exists(summary_file):
with open(summary_file) as f:
results = json.load(f)
config_name = "{:}-{:}".format(config["system_id"], config["scenario"])
if config_name not in results:
results[config_name] = {}
results[config_name][benchmark_name] = result
with open(summary_file, "w") as f:
json.dump(results, f)
# Check accuracy from loadgen logs.
accuracy = check_accuracy(os.path.join(log_dir, config["system_id"], benchmark_name, config["scenario"], "mlperf_log_accuracy.json"),
benchmark_name, config)
summary_file = os.path.join(log_dir, "accuracy_summary.json")
results = {}
if os.path.exists(summary_file):
with open(summary_file) as f:
results = json.load(f)
config_name = "{:}-{:}".format(config["system_id"], config["scenario"])
if config_name not in results:
results[config_name] = {}
results[config_name][benchmark_name] = accuracy
with open(summary_file, "w") as f:
json.dump(results, f)
def check_accuracy(log_file, benchmark_name, config):
accuracy_targets = {
"resnet": 76.46,
"mobilenet": 71.68,
"ssd-large": 20.0,
"ssd-small": 22.0,
"gnmt": 23.9
}
threshold_ratios = {
"resnet": 0.99,
"mobilenet": 0.98,
"ssd-large": 0.99,
"ssd-small": 0.99,
"gnmt": 0.99
}
if not os.path.exists(log_file):
return "Cannot find accuracy JSON file."
with open(log_file, "r") as f:
loadgen_dump = json.load(f)
if len(loadgen_dump) == 0:
return "No accuracy results in PerformanceOnly mode."
threshold = accuracy_targets[benchmark_name] * threshold_ratios[benchmark_name]
if benchmark_name in ["resnet", "mobilenet"]:
cmd = "python3 build/inference/v0.5/classification_and_detection/tools/accuracy-imagenet.py --mlperf-accuracy-file {:} \
--imagenet-val-file data_maps/imagenet/val_map.txt --dtype int32 ".format(log_file)
regex = r"accuracy=([0-9\.]+)%, good=[0-9]+, total=[0-9]+"
elif benchmark_name == "ssd-small":
cmd = "python3 build/inference/v0.5/classification_and_detection/tools/accuracy-coco.py --mlperf-accuracy-file {:} \
--coco-dir {:} --output-file build/ssd-small-results.json".format(
log_file, os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco"))
regex = r"mAP=([0-9\.]+)%"
elif benchmark_name == "ssd-large":
cmd = "python3 build/inference/v0.5/classification_and_detection/tools/accuracy-coco.py --mlperf-accuracy-file {:} \
--coco-dir {:} --output-file build/ssd-large-results.json --use-inv-map".format(
log_file, os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco"))
regex = r"mAP=([0-9\.]+)%"
elif benchmark_name == "gnmt":
cmd = "python3 build/inference/v0.5/translation/gnmt/tensorflow/process_accuracy.py --accuracy_log {:} \
--reference build/preprocessed_data/nmt/GNMT/newstest2014.tok.bpe.32000.de".format(log_file)
regex = r"BLEU: ([0-9\.]+)"
else:
raise ValueError("Unknown benchmark: {:}".format(benchmark_name))
output = run_command(cmd, get_output=True)
result_regex = re.compile(regex)
accuracy = None
with open(os.path.join(os.path.dirname(log_file), "accuracy.txt"), "w") as f:
for line in output:
print(line, file=f)
for line in output:
result_match = result_regex.match(line)
if not result_match is None:
accuracy = float(result_match.group(1))
break
accuracy_result = "PASSED" if accuracy is not None and accuracy >= threshold else "FAILED"
if accuracy_result == "FAILED":
raise RuntimeError("Accuracy = {:.3f}, Threshold = {:.3f}. Accuracy test {:}!".format(accuracy, threshold, accuracy_result))
return "Accuracy = {:.3f}, Threshold = {:.3f}. Accuracy test {:}.".format(accuracy, threshold, accuracy_result)
def handle_calibrate(benchmark_name, config):
logging.info("Generating calibration cache for Benchmark \"{:}\"".format(benchmark_name))
config = apply_overrides(config, common_args.CALIBRATION_ARGS)
config["dla_core"] = None
b = get_benchmark(benchmark_name, config)
b.calibrate()
def main():
# Turn off MPS in case it's turned on.
turn_off_mps()
main_args = common_args.parse_args(common_args.MAIN_ARGS)
benchmarks = ["mobilenet", "resnet", "ssd-small", "ssd-large", "gnmt"]
benchmarks_legacy_map = {
"ResNet50": "resnet",
"MobileNet": "mobilenet",
"SSDMobileNet": "ssd-small",
"SSDResNet34": "ssd-large",
"GNMT": "gnmt"
}
if main_args["benchmarks"] is not None:
benchmarks = main_args["benchmarks"].split(",")
for i, benchmark in enumerate(benchmarks):
if benchmark in benchmarks_legacy_map:
benchmarks[i] = benchmarks_legacy_map[benchmark]
scenarios = ["SingleStream", "MultiStream", "Offline", "Server"]
scenarios_legacy_map = {
"single_stream": "SingleStream",
"multi_stream": "MultiStream",
"offline": "Offline",
"server": "Server"
}
if main_args["scenarios"] is not None:
scenarios = main_args["scenarios"].split(",")
for i, scenario in enumerate(scenarios):
if scenario in scenarios_legacy_map:
scenarios[i] = scenarios_legacy_map[scenario]
# Automatically detect architecture and scenarios and load configs
config_files = main_args["configs"]
if config_files == "":
config_files = find_config_files(benchmarks, scenarios)
if config_files == "":
logging.warn("Cannot find any valid configs for the specified benchmarks scenarios.")
return
logging.info("Using config files: {:}".format(str(config_files)))
configs = load_configs(config_files)
for config in configs:
logging.info("Processing config \"{:}\"".format(config["config_name"]))
benchmark_name = config["benchmark"]
benchmark_conf = config[benchmark_name]
# Passthrough for top level values
benchmark_conf["system_id"] = config["system_id"]
benchmark_conf["scenario"] = config["scenario"]
benchmark_conf["benchmark"] = config["benchmark"]
benchmark_conf["config_name"] = config["config_name"]
need_gpu = not main_args["no_gpu"]
need_dla = not main_args["gpu_only"]
if main_args["action"] == "generate_engines":
# Turn on MPS if server scenario and if active_sms is specified.
benchmark_conf = apply_overrides(benchmark_conf, ["active_sms"])
active_sms = benchmark_conf.get("active_sms", None)
if config["scenario"] == "Server" and active_sms is not None and active_sms < 100:
with ScopedMPS(active_sms):
launch_handle_generate_engine(benchmark_name, benchmark_conf, need_gpu, need_dla)
else:
launch_handle_generate_engine(benchmark_name, benchmark_conf, need_gpu, need_dla)
elif main_args["action"] == "run_harness":
handle_run_harness(benchmark_name, benchmark_conf, need_gpu, need_dla)
elif main_args["action"] == "calibrate":
# To generate calibration cache, we only need to run each benchmark once. Use offline config.
if benchmark_conf["scenario"] == "Offline":
handle_calibrate(benchmark_name, benchmark_conf)
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import os, sys
sys.path.insert(0, os.getcwd())
from code.common.scopedMPS import ScopedMPS, turn_off_mps
from code.common import logging
from code.common import args_to_string, find_config_files, load_configs, run_command
import code.common.arguments as common_args
from importlib import import_module
import multiprocessing as mp
from multiprocessing import Process
def get_benchmark(benchmark_name, conf):
# Do not use a map. We want to import benchmarks as we need them, because some take
# time to load due to plugins.
if benchmark_name == "resnet":
ResNet50 = import_module("code.resnet.tensorrt.ResNet50").ResNet50
return ResNet50(conf)
elif benchmark_name == "mobilenet":
MobileNet = import_module("code.mobilenet.tensorrt.MobileNet").MobileNet
return MobileNet(conf)
elif benchmark_name == "ssd-small":
SSDMobileNet = import_module("code.ssd-small.tensorrt.SSDMobileNet").SSDMobileNet
return SSDMobileNet(conf)
elif benchmark_name == "ssd-large":
SSDResNet34 = import_module("code.ssd-large.tensorrt.SSDResNet34").SSDResNet34
return SSDResNet34(conf)
elif benchmark_name == "gnmt":
GNMTBuilder = import_module("code.gnmt.tensorrt.GNMT").GNMTBuilder
return GNMTBuilder(conf)
else:
raise ValueError("Unknown benchmark: {:}".format(benchmark_name))
def apply_overrides(config, keys):
# Make a copy so we don't modify original dict
config = dict(config)
override_args = common_args.parse_args(keys)
for key in override_args:
# Unset values (None) and unset store_true values (False) are both false-y
if override_args[key]:
config[key] = override_args[key]
return config
def launch_handle_generate_engine(benchmark_name, config, gpu, dla):
retries = 3
timeout = 7200
success = False
for i in range(retries):
# Build engines in another process to make sure we exit with clean cuda context so that MPS can be turned off.
from code.main import handle_generate_engine
p = Process(target=handle_generate_engine, args=(benchmark_name, config, gpu, dla))
p.start()
try:
p.join(timeout)
except KeyboardInterrupt:
p.terminate()
p.join(timeout)
raise KeyboardInterrupt
if p.exitcode == 0:
success = True
break
if not success:
raise RuntimeError("Building engines failed!")
def handle_generate_engine(benchmark_name, config, gpu=True, dla=True):
logging.info("Building engines for {:} benchmark in {:} scenario...".format(benchmark_name, config["scenario"]))
if benchmark_name == "gnmt":
arglist = common_args.GNMT_ENGINE_ARGS
else:
arglist = common_args.GENERATE_ENGINE_ARGS
config = apply_overrides(config, arglist)
if dla and "dla_batch_size" in config:
config["batch_size"] = config["dla_batch_size"]
logging.info("Building DLA engine for {:}_{:}_{:}".format(config["system_id"], benchmark_name, config["scenario"]))
b = get_benchmark(benchmark_name, config)
b.build_engines()
if gpu and "gpu_batch_size" in config:
config["batch_size"] = config["gpu_batch_size"]
config["dla_core"] = None
logging.info("Building GPU engine for {:}_{:}_{:}".format(config["system_id"], benchmark_name, config["scenario"]))
b = get_benchmark(benchmark_name, config)
b.build_engines()
if gpu and config["scenario"] == "Server" and benchmark_name == "gnmt":
b = get_benchmark(benchmark_name, config)
b.build_engines()
logging.info("Finished building engines for {:} benchmark in {:} scenario.".format(benchmark_name, config["scenario"]))
def handle_run_harness(benchmark_name, config, gpu=True, dla=True):
logging.info("Running harness for {:} benchmark in {:} scenario...".format(benchmark_name, config["scenario"]))
if config["scenario"] == "SingleStream":
arglist = common_args.SINGLE_STREAM_HARNESS_ARGS
elif config["scenario"] == "Offline":
arglist = common_args.OFFLINE_HARNESS_ARGS
elif config["scenario"] == "MultiStream":
arglist = common_args.MULTI_STREAM_HARNESS_ARGS
elif config["scenario"] == "Server":
arglist = common_args.SERVER_HARNESS_ARGS
if benchmark_name == "gnmt":
arglist = common_args.GNMT_HARNESS_ARGS
config = apply_overrides(config, arglist)
# Validate arguments
if not dla:
config["dla_batch_size"] = None
if not gpu:
config["gpu_batch_size"] = None
if benchmark_name == "gnmt":
from code.common.harness import GNMTHarness
harness = GNMTHarness(config, name=benchmark_name)
else:
from code.common.harness import BenchmarkHarness
harness = BenchmarkHarness(config, name=benchmark_name)
result = harness.run_harness()
logging.info("Result: {:}".format(result))
# Append result to perf result summary log.
log_dir = config["log_dir"]
summary_file = os.path.join(log_dir, "perf_harness_summary.json")
results = {}
if os.path.exists(summary_file):
with open(summary_file) as f:
results = json.load(f)
config_name = "{:}-{:}".format(config["system_id"], config["scenario"])
if config_name not in results:
results[config_name] = {}
results[config_name][benchmark_name] = result
with open(summary_file, "w") as f:
json.dump(results, f)
# Check accuracy from loadgen logs.
accuracy = check_accuracy(os.path.join(log_dir, config["system_id"], benchmark_name, config["scenario"], "mlperf_log_accuracy.json"),
benchmark_name, config)
summary_file = os.path.join(log_dir, "accuracy_summary.json")
results = {}
if os.path.exists(summary_file):
with open(summary_file) as f:
results = json.load(f)
config_name = "{:}-{:}".format(config["system_id"], config["scenario"])
if config_name not in results:
results[config_name] = {}
results[config_name][benchmark_name] = accuracy
with open(summary_file, "w") as f:
json.dump(results, f)
def check_accuracy(log_file, benchmark_name, config):
accuracy_targets = {
"resnet": 76.46,
"mobilenet": 71.68,
"ssd-large": 20.0,
"ssd-small": 22.0,
"gnmt": 23.9
}
threshold_ratios = {
"resnet": 0.99,
"mobilenet": 0.98,
"ssd-large": 0.99,
"ssd-small": 0.99,
"gnmt": 0.99
}
if not os.path.exists(log_file):
return "Cannot find accuracy JSON file."
with open(log_file, "r") as f:
loadgen_dump = json.load(f)
if len(loadgen_dump) == 0:
return "No accuracy results in PerformanceOnly mode."
threshold = accuracy_targets[benchmark_name] * threshold_ratios[benchmark_name]
if benchmark_name in ["resnet", "mobilenet"]:
cmd = "python3 build/inference/v0.5/classification_and_detection/tools/accuracy-imagenet.py --mlperf-accuracy-file {:} \
--imagenet-val-file data_maps/imagenet/val_map.txt --dtype int32 ".format(log_file)
regex = r"accuracy=([0-9\.]+)%, good=[0-9]+, total=[0-9]+"
elif benchmark_name == "ssd-small":
cmd = "python3 build/inference/v0.5/classification_and_detection/tools/accuracy-coco.py --mlperf-accuracy-file {:} \
--coco-dir {:} --output-file build/ssd-small-results.json".format(
log_file, os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco"))
regex = r"mAP=([0-9\.]+)%"
elif benchmark_name == "ssd-large":
cmd = "python3 build/inference/v0.5/classification_and_detection/tools/accuracy-coco.py --mlperf-accuracy-file {:} \
--coco-dir {:} --output-file build/ssd-large-results.json --use-inv-map".format(
log_file, os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco"))
regex = r"mAP=([0-9\.]+)%"
elif benchmark_name == "gnmt":
cmd = "python3 build/inference/v0.5/translation/gnmt/tensorflow/process_accuracy.py --accuracy_log {:} \
--reference build/preprocessed_data/nmt/GNMT/newstest2014.tok.bpe.32000.de".format(log_file)
regex = r"BLEU: ([0-9\.]+)"
else:
raise ValueError("Unknown benchmark: {:}".format(benchmark_name))
output = run_command(cmd, get_output=True)
result_regex = re.compile(regex)
accuracy = None
with open(os.path.join(os.path.dirname(log_file), "accuracy.txt"), "w") as f:
for line in output:
print(line, file=f)
for line in output:
result_match = result_regex.match(line)
if not result_match is None:
accuracy = float(result_match.group(1))
break
accuracy_result = "PASSED" if accuracy is not None and accuracy >= threshold else "FAILED"
if accuracy_result == "FAILED":
raise RuntimeError("Accuracy = {:.3f}, Threshold = {:.3f}. Accuracy test {:}!".format(accuracy, threshold, accuracy_result))
return "Accuracy = {:.3f}, Threshold = {:.3f}. Accuracy test {:}.".format(accuracy, threshold, accuracy_result)
def handle_calibrate(benchmark_name, config):
logging.info("Generating calibration cache for Benchmark \"{:}\"".format(benchmark_name))
config = apply_overrides(config, common_args.CALIBRATION_ARGS)
config["dla_core"] = None
b = get_benchmark(benchmark_name, config)
b.calibrate()
def main():
# Turn off MPS in case it's turned on.
turn_off_mps()
main_args = common_args.parse_args(common_args.MAIN_ARGS)
benchmarks = ["mobilenet", "resnet", "ssd-small", "ssd-large", "gnmt"]
benchmarks_legacy_map = {
"ResNet50": "resnet",
"MobileNet": "mobilenet",
"SSDMobileNet": "ssd-small",
"SSDResNet34": "ssd-large",
"GNMT": "gnmt"
}
if main_args["benchmarks"] is not None:
benchmarks = main_args["benchmarks"].split(",")
for i, benchmark in enumerate(benchmarks):
if benchmark in benchmarks_legacy_map:
benchmarks[i] = benchmarks_legacy_map[benchmark]
scenarios = ["SingleStream", "MultiStream", "Offline", "Server"]
scenarios_legacy_map = {
"single_stream": "SingleStream",
"multi_stream": "MultiStream",
"offline": "Offline",
"server": "Server"
}
if main_args["scenarios"] is not None:
scenarios = main_args["scenarios"].split(",")
for i, scenario in enumerate(scenarios):
if scenario in scenarios_legacy_map:
scenarios[i] = scenarios_legacy_map[scenario]
# Automatically detect architecture and scenarios and load configs
config_files = main_args["configs"]
if config_files == "":
config_files = find_config_files(benchmarks, scenarios)
if config_files == "":
logging.warn("Cannot find any valid configs for the specified benchmarks scenarios.")
return
logging.info("Using config files: {:}".format(str(config_files)))
configs = load_configs(config_files)
for config in configs:
logging.info("Processing config \"{:}\"".format(config["config_name"]))
benchmark_name = config["benchmark"]
benchmark_conf = config[benchmark_name]
# Passthrough for top level values
benchmark_conf["system_id"] = config["system_id"]
benchmark_conf["scenario"] = config["scenario"]
benchmark_conf["benchmark"] = config["benchmark"]
benchmark_conf["config_name"] = config["config_name"]
need_gpu = not main_args["no_gpu"]
need_dla = not main_args["gpu_only"]
if main_args["action"] == "generate_engines":
# Turn on MPS if server scenario and if active_sms is specified.
benchmark_conf = apply_overrides(benchmark_conf, ["active_sms"])
active_sms = benchmark_conf.get("active_sms", None)
if config["scenario"] == "Server" and active_sms is not None and active_sms < 100:
with ScopedMPS(active_sms):
launch_handle_generate_engine(benchmark_name, benchmark_conf, need_gpu, need_dla)
else:
launch_handle_generate_engine(benchmark_name, benchmark_conf, need_gpu, need_dla)
elif main_args["action"] == "run_harness":
handle_run_harness(benchmark_name, benchmark_conf, need_gpu, need_dla)
elif main_args["action"] == "calibrate":
# To generate calibration cache, we only need to run each benchmark once. Use offline config.
if benchmark_conf["scenario"] == "Offline":
handle_calibrate(benchmark_name, benchmark_conf)
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| en | 0.846398 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Do not use a map. We want to import benchmarks as we need them, because some take # time to load due to plugins. # Make a copy so we don't modify original dict # Unset values (None) and unset store_true values (False) are both false-y # Build engines in another process to make sure we exit with clean cuda context so that MPS can be turned off. # Validate arguments # Append result to perf result summary log. # Check accuracy from loadgen logs. # Turn off MPS in case it's turned on. # Automatically detect architecture and scenarios and load configs # Passthrough for top level values # Turn on MPS if server scenario and if active_sms is specified. # To generate calibration cache, we only need to run each benchmark once. Use offline config. | 1.807481 | 2 |
utils/place.py | rwaldron/pcbmode | 0 | 6633164 | #!/usr/bin/python
from lxml import etree as et
import config
import messages as msg
# pcbmode modules
import utils
import svg
from point import Point
def placeShape(shape, svg_layer, invert=False, original=False):
"""
Places a shape or type 'Shape' onto SVG layer 'svg_layer'.
'invert' : placed path should be mirrored
'original': use the original path, not the transformed one
"""
sig_dig = config.cfg['significant-digits']
style_string = shape.getStyleString()
style_type = shape.getStyleType()
gerber_lp = shape.getGerberLP()
location = shape.getLocation()
if original == False:
translate = 'translate(%s,%s)' % (round((((1,-1)[invert])*location.x), sig_dig),
round(location.y*config.cfg['invert-y'], sig_dig))
transform = translate
else:
transform = None
if invert == True:
path = shape.getTransformedPath(True)
else:
if original == True:
path = shape.getOriginalPath()
else:
path = shape.getTransformedPath()
element = et.SubElement(svg_layer,
'path',
d=path)
# Set style string
element.set('style', style_string)
# Set style type in pcbmode namespace. This is later used to easliy
# identify the type when the path is converted to Gerber format
element.set('{'+config.cfg['ns']['pcbmode']+'}style', style_type)
if transform != None:
element.set('transform', transform)
if gerber_lp != None:
element.set('{'+config.cfg['ns']['pcbmode']+'}gerber-lp', gerber_lp)
if shape.getType() == 'text':
element.set('{'+config.cfg['ns']['pcbmode']+'}text', shape.getText())
return element
def placeDrill(drill,
layer,
location,
scale,
soldermask_layers={},
mask_groups={}):
"""
Places the drilling point
"""
diameter = drill.get('diameter')
offset = utils.to_Point(drill.get('offset') or [0, 0])
path = svg.drill_diameter_to_path(diameter)
mask_path = svg.circle_diameter_to_path(diameter)
sig_dig = config.cfg['significant-digits']
transform = 'translate(%s %s)' % (round((location.x + offset.x)*scale, sig_dig),
round((-location.y - offset.y)*scale, sig_dig))
drill_element = et.SubElement(layer, 'path',
transform=transform,
d=path,
id='pad_drill',
diameter=str(diameter))
pour_buffer = 1.0
try:
pour_buffer = board_cfg['distances']['buffer_from_pour_to'].get('drill') or 1.0
except:
pass
# add a mask buffer between pour and board outline
if mask_groups != {}:
for pcb_layer in surface_layers:
mask_group = et.SubElement(mask_groups[pcb_layer], 'g',
id="drill_masks")
pour_mask = et.SubElement(mask_group, 'path',
transform=transform,
style=MASK_STYLE % str(pour_buffer*2),
gerber_lp="c",
d=mask_path)
# place the size of the drill; id the drill element has a
# "show_diameter": "no", then this can be suppressed
# default to 'yes'
show_diameter = drill.get('show_diameter') or 'yes'
if show_diameter.lower() != 'no':
text = "%s mm" % (str(diameter))
text_style = config.stl['layout']['drills'].get('text') or None
if text_style is not None:
text_style['font-size'] = str(diameter/10.0)+'px'
text_style = utils.dict_to_style(text_style)
t = et.SubElement(layer, 'text',
x=str(location.x),
# TODO: get rid of this hack
y=str(-location.y-(diameter/4)),
style=text_style)
t.text = text
# place soldermask unless specified otherwise
# default is 'yes'
add_soldermask = drill.get('add_soldermask') or 'yes'
style = utils.dict_to_style(config.stl['layout']['soldermask'].get('fill'))
possible_answers = ['yes', 'top', 'top only', 'bottom', 'bottom only', 'top and bottom']
if (add_soldermask.lower() in possible_answers) and (soldermask_layers != {}):
# TODO: get this into a configuration parameter
drill_soldermask_scale_factors = drill.get('soldermask_scale_factors') or {'top':1.2, 'bottom':1.2}
path_top = svg.circle_diameter_to_path(diameter * drill_soldermask_scale_factors['top'])
path_bottom = svg.circle_diameter_to_path(diameter * drill_soldermask_scale_factors['bottom'])
if add_soldermask.lower() == 'yes' or add_soldermask.lower() == 'top and bottom':
drill_element = et.SubElement(soldermask_layers['top'],
'path',
transform=transform,
style=style,
d=path_top)
drill_element = et.SubElement(soldermask_layers['bottom'],
'path',
transform=transform,
style=style,
d=path_bottom)
elif add_soldermask.lower() == 'top only' or add_soldermask.lower() == 'top':
drill_element = et.SubElement(soldermask_layers['top'],
'path',
transform=transform,
style=style,
d=path_top)
elif add_soldermask.lower() == 'bottom only' or add_soldermask.lower() == 'bottom':
drill_element = et.SubElement(soldermask_layers['bottom'],
'path',
transform=transform,
style=style,
d=path_bottom)
else:
print "ERROR: unrecognised drills soldermask option"
return
| #!/usr/bin/python
from lxml import etree as et
import config
import messages as msg
# pcbmode modules
import utils
import svg
from point import Point
def placeShape(shape, svg_layer, invert=False, original=False):
"""
Places a shape or type 'Shape' onto SVG layer 'svg_layer'.
'invert' : placed path should be mirrored
'original': use the original path, not the transformed one
"""
sig_dig = config.cfg['significant-digits']
style_string = shape.getStyleString()
style_type = shape.getStyleType()
gerber_lp = shape.getGerberLP()
location = shape.getLocation()
if original == False:
translate = 'translate(%s,%s)' % (round((((1,-1)[invert])*location.x), sig_dig),
round(location.y*config.cfg['invert-y'], sig_dig))
transform = translate
else:
transform = None
if invert == True:
path = shape.getTransformedPath(True)
else:
if original == True:
path = shape.getOriginalPath()
else:
path = shape.getTransformedPath()
element = et.SubElement(svg_layer,
'path',
d=path)
# Set style string
element.set('style', style_string)
# Set style type in pcbmode namespace. This is later used to easliy
# identify the type when the path is converted to Gerber format
element.set('{'+config.cfg['ns']['pcbmode']+'}style', style_type)
if transform != None:
element.set('transform', transform)
if gerber_lp != None:
element.set('{'+config.cfg['ns']['pcbmode']+'}gerber-lp', gerber_lp)
if shape.getType() == 'text':
element.set('{'+config.cfg['ns']['pcbmode']+'}text', shape.getText())
return element
def placeDrill(drill,
layer,
location,
scale,
soldermask_layers={},
mask_groups={}):
"""
Places the drilling point
"""
diameter = drill.get('diameter')
offset = utils.to_Point(drill.get('offset') or [0, 0])
path = svg.drill_diameter_to_path(diameter)
mask_path = svg.circle_diameter_to_path(diameter)
sig_dig = config.cfg['significant-digits']
transform = 'translate(%s %s)' % (round((location.x + offset.x)*scale, sig_dig),
round((-location.y - offset.y)*scale, sig_dig))
drill_element = et.SubElement(layer, 'path',
transform=transform,
d=path,
id='pad_drill',
diameter=str(diameter))
pour_buffer = 1.0
try:
pour_buffer = board_cfg['distances']['buffer_from_pour_to'].get('drill') or 1.0
except:
pass
# add a mask buffer between pour and board outline
if mask_groups != {}:
for pcb_layer in surface_layers:
mask_group = et.SubElement(mask_groups[pcb_layer], 'g',
id="drill_masks")
pour_mask = et.SubElement(mask_group, 'path',
transform=transform,
style=MASK_STYLE % str(pour_buffer*2),
gerber_lp="c",
d=mask_path)
# place the size of the drill; id the drill element has a
# "show_diameter": "no", then this can be suppressed
# default to 'yes'
show_diameter = drill.get('show_diameter') or 'yes'
if show_diameter.lower() != 'no':
text = "%s mm" % (str(diameter))
text_style = config.stl['layout']['drills'].get('text') or None
if text_style is not None:
text_style['font-size'] = str(diameter/10.0)+'px'
text_style = utils.dict_to_style(text_style)
t = et.SubElement(layer, 'text',
x=str(location.x),
# TODO: get rid of this hack
y=str(-location.y-(diameter/4)),
style=text_style)
t.text = text
# place soldermask unless specified otherwise
# default is 'yes'
add_soldermask = drill.get('add_soldermask') or 'yes'
style = utils.dict_to_style(config.stl['layout']['soldermask'].get('fill'))
possible_answers = ['yes', 'top', 'top only', 'bottom', 'bottom only', 'top and bottom']
if (add_soldermask.lower() in possible_answers) and (soldermask_layers != {}):
# TODO: get this into a configuration parameter
drill_soldermask_scale_factors = drill.get('soldermask_scale_factors') or {'top':1.2, 'bottom':1.2}
path_top = svg.circle_diameter_to_path(diameter * drill_soldermask_scale_factors['top'])
path_bottom = svg.circle_diameter_to_path(diameter * drill_soldermask_scale_factors['bottom'])
if add_soldermask.lower() == 'yes' or add_soldermask.lower() == 'top and bottom':
drill_element = et.SubElement(soldermask_layers['top'],
'path',
transform=transform,
style=style,
d=path_top)
drill_element = et.SubElement(soldermask_layers['bottom'],
'path',
transform=transform,
style=style,
d=path_bottom)
elif add_soldermask.lower() == 'top only' or add_soldermask.lower() == 'top':
drill_element = et.SubElement(soldermask_layers['top'],
'path',
transform=transform,
style=style,
d=path_top)
elif add_soldermask.lower() == 'bottom only' or add_soldermask.lower() == 'bottom':
drill_element = et.SubElement(soldermask_layers['bottom'],
'path',
transform=transform,
style=style,
d=path_bottom)
else:
print "ERROR: unrecognised drills soldermask option"
return
| en | 0.583149 | #!/usr/bin/python # pcbmode modules Places a shape or type 'Shape' onto SVG layer 'svg_layer'. 'invert' : placed path should be mirrored 'original': use the original path, not the transformed one # Set style string # Set style type in pcbmode namespace. This is later used to easliy # identify the type when the path is converted to Gerber format Places the drilling point # add a mask buffer between pour and board outline # place the size of the drill; id the drill element has a # "show_diameter": "no", then this can be suppressed # default to 'yes' # TODO: get rid of this hack # place soldermask unless specified otherwise # default is 'yes' # TODO: get this into a configuration parameter | 2.889535 | 3 |
Homework3/json_parser.py | emilyblack95/CS-5513 | 1 | 6633165 | <filename>Homework3/json_parser.py
import json
counter = 0
page = open('movieData.json', 'r')
parsed = json.loads(page.read())
f = open('dataInsertion.js', 'w+')
f.write('db.createCollection("movies");\n')
f.write('db.movies.ensureIndex("movie_id");\n')
for item in parsed['my_movies']:
if counter<5:
print(json.dumps(item))
print('\n')
counter+=1
f.write('db.movies.insert([' + json.dumps(item) + ']);\n')
f.close() | <filename>Homework3/json_parser.py
import json
counter = 0
page = open('movieData.json', 'r')
parsed = json.loads(page.read())
f = open('dataInsertion.js', 'w+')
f.write('db.createCollection("movies");\n')
f.write('db.movies.ensureIndex("movie_id");\n')
for item in parsed['my_movies']:
if counter<5:
print(json.dumps(item))
print('\n')
counter+=1
f.write('db.movies.insert([' + json.dumps(item) + ']);\n')
f.close() | none | 1 | 3.143598 | 3 |
|
graphOfStretchingResistanceConstant.py | Peeks1/AffineCarpetProject | 0 | 6633166 | <gh_stars>0
import matplotlib.pyplot as plt
import os.path as p
import os
# INPUT HERE
# what level affine carpet would you like rhos for:
precarpet_level = 6
# how large would you like the small squares to be:
sideOfSmallSquares = 1 / 4
# would you like a cross or X-graph (input "+" or "x"):
kindOfGraph = "x"
# what stretches would you like to compute
stretchFactors = [1/8, 1/4, 1/2, 1, 2, 4, 8]
# other important variable calculated from above variables
sideOfCenterHole = 1 - sideOfSmallSquares * 2
stretchFactors.sort()
# file naming variables
kogString = ''
typeOfCarpet = str(sideOfSmallSquares.__round__(3)) + "affineCarpetSRRatioData"
level = 'level' + str(precarpet_level)
if kindOfGraph == '+':
kogString = 'crossGraphData'
elif kindOfGraph == 'x':
kogString = 'xGraphData'
else:
exit()
saveFileAs = kogString + '/' + typeOfCarpet + '/' + level + '.pdf'
if not p.isdir(kogString + '/' + typeOfCarpet):
os.makedirs(kogString + '/' + typeOfCarpet)
# extract base resistance
baseFolder = str(sideOfSmallSquares.__round__(3)) + 'affineCarpet1x' + str(1)
prevLevel = 'level' + str(precarpet_level - 1)
prevFile = kogString + "/" + baseFolder + "/" + prevLevel + 'resistance.txt'
# for some reason the below code just doesn't work
'''if not p.isdir(prevFile):
print('You need to calculate the resistance of the ' + prevLevel + ' ' + '1x1 carpet using resistanceSaver.py')
exit()'''
baseFile = open(prevFile, 'r')
baseFileData = baseFile.readlines()
baseResistance = float(baseFileData[1][14:])
# list of the S values (stretched resistance of n)
stretechedResistances = []
for i in stretchFactors:
resistanceFolder = str(sideOfSmallSquares.__round__(3)) + 'affineCarpet1x' + str(i.__round__(3))
level = 'level' + str(precarpet_level)
filePath = kogString + "/" + resistanceFolder + "/" + level + 'resistance.txt'
if not p.isfile(filePath):
print('You need to calculate the resistance of the ' + level + ' ' + '1x' + str(i.__round__(3)) +
' carpet using resistanceSaver.py')
exit()
file = open(filePath, 'r')
fileData = file.readlines()
stretechedResistances.append(float(fileData[1][14:]))
# calculate rho
rhos = []
for f in stretechedResistances:
rhos.append(f/baseResistance)
# plot
plt.scatter(stretchFactors, rhos)
plt.xticks(stretchFactors)
plt.yticks(range(0, 3))
plt.xlabel("Amount of Stretch")
plt.ylabel("Rho of Graph")
for j in range(len(rhos)):
plt.text(stretchFactors[j], rhos[j] + .05, rhos[j].__round__(3))
# title
levelTitle = "level " + str(precarpet_level) + " "
smallSquareStr = str(sideOfSmallSquares.__round__(3))
plt.title("Resistance of the Stretched " + levelTitle + smallSquareStr + "Affine Carpet Divided by the Previous Level's "
"Unstretched Resistance")
# save
stretchesStr = ''
for stretch in stretchesStr:
stretchesStr += str(stretch.__round__(2)) + ","
if p.isfile(saveFileAs):
print('You already have rho data for this level. Press y if you would like to overwrite this data.')
keypress = input()
if keypress == 'y':
plt.savefig(saveFileAs)
else:
plt.savefig(saveFileAs)
plt.show()
| import matplotlib.pyplot as plt
import os.path as p
import os
# INPUT HERE
# what level affine carpet would you like rhos for:
precarpet_level = 6
# how large would you like the small squares to be:
sideOfSmallSquares = 1 / 4
# would you like a cross or X-graph (input "+" or "x"):
kindOfGraph = "x"
# what stretches would you like to compute
stretchFactors = [1/8, 1/4, 1/2, 1, 2, 4, 8]
# other important variable calculated from above variables
sideOfCenterHole = 1 - sideOfSmallSquares * 2
stretchFactors.sort()
# file naming variables
kogString = ''
typeOfCarpet = str(sideOfSmallSquares.__round__(3)) + "affineCarpetSRRatioData"
level = 'level' + str(precarpet_level)
if kindOfGraph == '+':
kogString = 'crossGraphData'
elif kindOfGraph == 'x':
kogString = 'xGraphData'
else:
exit()
saveFileAs = kogString + '/' + typeOfCarpet + '/' + level + '.pdf'
if not p.isdir(kogString + '/' + typeOfCarpet):
os.makedirs(kogString + '/' + typeOfCarpet)
# extract base resistance
baseFolder = str(sideOfSmallSquares.__round__(3)) + 'affineCarpet1x' + str(1)
prevLevel = 'level' + str(precarpet_level - 1)
prevFile = kogString + "/" + baseFolder + "/" + prevLevel + 'resistance.txt'
# for some reason the below code just doesn't work
'''if not p.isdir(prevFile):
print('You need to calculate the resistance of the ' + prevLevel + ' ' + '1x1 carpet using resistanceSaver.py')
exit()'''
baseFile = open(prevFile, 'r')
baseFileData = baseFile.readlines()
baseResistance = float(baseFileData[1][14:])
# list of the S values (stretched resistance of n)
stretechedResistances = []
for i in stretchFactors:
resistanceFolder = str(sideOfSmallSquares.__round__(3)) + 'affineCarpet1x' + str(i.__round__(3))
level = 'level' + str(precarpet_level)
filePath = kogString + "/" + resistanceFolder + "/" + level + 'resistance.txt'
if not p.isfile(filePath):
print('You need to calculate the resistance of the ' + level + ' ' + '1x' + str(i.__round__(3)) +
' carpet using resistanceSaver.py')
exit()
file = open(filePath, 'r')
fileData = file.readlines()
stretechedResistances.append(float(fileData[1][14:]))
# calculate rho
rhos = []
for f in stretechedResistances:
rhos.append(f/baseResistance)
# plot
plt.scatter(stretchFactors, rhos)
plt.xticks(stretchFactors)
plt.yticks(range(0, 3))
plt.xlabel("Amount of Stretch")
plt.ylabel("Rho of Graph")
for j in range(len(rhos)):
plt.text(stretchFactors[j], rhos[j] + .05, rhos[j].__round__(3))
# title
levelTitle = "level " + str(precarpet_level) + " "
smallSquareStr = str(sideOfSmallSquares.__round__(3))
plt.title("Resistance of the Stretched " + levelTitle + smallSquareStr + "Affine Carpet Divided by the Previous Level's "
"Unstretched Resistance")
# save
stretchesStr = ''
for stretch in stretchesStr:
stretchesStr += str(stretch.__round__(2)) + ","
if p.isfile(saveFileAs):
print('You already have rho data for this level. Press y if you would like to overwrite this data.')
keypress = input()
if keypress == 'y':
plt.savefig(saveFileAs)
else:
plt.savefig(saveFileAs)
plt.show() | en | 0.885056 | # INPUT HERE # what level affine carpet would you like rhos for: # how large would you like the small squares to be: # would you like a cross or X-graph (input "+" or "x"): # what stretches would you like to compute # other important variable calculated from above variables # file naming variables # extract base resistance # for some reason the below code just doesn't work if not p.isdir(prevFile): print('You need to calculate the resistance of the ' + prevLevel + ' ' + '1x1 carpet using resistanceSaver.py') exit() # list of the S values (stretched resistance of n) # calculate rho # plot # title # save | 2.773698 | 3 |
server/lib/python/cartodb_services/test/credentials.py | CartoDB/dataservices-api | 22 | 6633167 | import os
def mapbox_api_key():
"""Returns Mapbox API key. Requires setting MAPBOX_API_KEY environment variable."""
return os.environ['MAPBOX_API_KEY']
def tomtom_api_key():
"""Returns TomTom API key. Requires setting TOMTOM_API_KEY environment variable."""
return os.environ['TOMTOM_API_KEY']
def geocodio_api_key():
"""Returns Geocodio API key. Requires setting GEOCODIO_API_KEY environment variable."""
return os.environ['GEOCODIO_API_KEY']
| import os
def mapbox_api_key():
"""Returns Mapbox API key. Requires setting MAPBOX_API_KEY environment variable."""
return os.environ['MAPBOX_API_KEY']
def tomtom_api_key():
"""Returns TomTom API key. Requires setting TOMTOM_API_KEY environment variable."""
return os.environ['TOMTOM_API_KEY']
def geocodio_api_key():
"""Returns Geocodio API key. Requires setting GEOCODIO_API_KEY environment variable."""
return os.environ['GEOCODIO_API_KEY']
| en | 0.49495 | Returns Mapbox API key. Requires setting MAPBOX_API_KEY environment variable. Returns TomTom API key. Requires setting TOMTOM_API_KEY environment variable. Returns Geocodio API key. Requires setting GEOCODIO_API_KEY environment variable. | 1.836155 | 2 |
dc09_spt/msg/dc05_msg.py | panos-stavrianos/dc09_spt | 19 | 6633168 | <filename>dc09_spt/msg/dc05_msg.py
# ----------------------------
# Class to implement the SIA DC05 message
# (c 2018 van Ovost Automatisering b.v.
# Author : <NAME>
# ----------------------------
from dc09_spt.param import *
"""
Copyright (c) 2018 van Ovost Automatisering b.v.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
you may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class dc05_codes:
"""
Some special codes
"""
@staticmethod
def dc05_is_user(code):
"""
Codes that have the user number following the code.
Note that there is no way to transfer a zone in the message
"""
codes_with_user = {"121", "313", "400", "401", "402", "403", "404", "405",
"406", "407", "408", "409", "441", "442", "450", "451", "452", "453",
"454", "455", "456", "457", "458", "459", "462", "463", "464", "466",
"411", "412", "413", "414", "415", "421", "422", "424", "425", "429",
"430", "431", "574", "604", "607", "625", "642", "652", "653"}
return code in codes_with_user
class dc05_msg:
@staticmethod
def dc05event(spt_account, params={}):
"""
Construct a DC05 message, also called Ademco Contact ID
Parameters
spt_account
the account of the alarm transceiver.
in most situations this will be used in the alarm message too, but for situations like
a cloud based receiver, the account in the map will be different.
params
a map with key-value pairs.
at this moment only the more commonly used fields are used.
the currently handled keys are:
account
the account number.
most receivers expect 4 to 8 numeric digits
area
the area number in which the event happened
(area is a part of an installation that can arm and disarm independently)
zone
the alarm zone.
code
the event code in 3 numbers according to the DC05 standard.
q
the qualifier defining the state of the alarm.
1 means new alarm
3 means new restore
6 means old alarm
"""
account = param.strpar(params, 'account', spt_account)
zone = param.numpar(params, 'zone', '000')
user = param.numpar(params, 'user', None)
msg = ''
if account is None:
msg += '#0000|'
else:
msg += '#' + account + '|'
code = param.numpar(params, 'code', '602')
if len(code) != 3:
raise Exception('Code should be 3 positions')
q = param.numpar(params, 'q', '1')
if q != '1' and q != '3' and q != '3':
raise Exception('Qualifier q should be 1 or 3 or 6')
area = param.numpar(params, 'area', '00')
if len(area) != 2:
area = ('00' + area)[-2:]
if dc05_codes.dc05_is_user(code) and user is not None:
if len(user) != 3:
user = ('000' + user)[-3:]
msg += q + code + ' ' + area + ' ' + user + ']'
else:
if len(zone) != 3:
zone = ('000' + zone)[-3:]
msg += q + code + ' ' + area + ' ' + zone + ']'
return msg
| <filename>dc09_spt/msg/dc05_msg.py
# ----------------------------
# Class to implement the SIA DC05 message
# (c 2018 van Ovost Automatisering b.v.
# Author : <NAME>
# ----------------------------
from dc09_spt.param import *
"""
Copyright (c) 2018 van Ovost Automatisering b.v.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
you may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class dc05_codes:
"""
Some special codes
"""
@staticmethod
def dc05_is_user(code):
"""
Codes that have the user number following the code.
Note that there is no way to transfer a zone in the message
"""
codes_with_user = {"121", "313", "400", "401", "402", "403", "404", "405",
"406", "407", "408", "409", "441", "442", "450", "451", "452", "453",
"454", "455", "456", "457", "458", "459", "462", "463", "464", "466",
"411", "412", "413", "414", "415", "421", "422", "424", "425", "429",
"430", "431", "574", "604", "607", "625", "642", "652", "653"}
return code in codes_with_user
class dc05_msg:
@staticmethod
def dc05event(spt_account, params={}):
"""
Construct a DC05 message, also called Ademco Contact ID
Parameters
spt_account
the account of the alarm transceiver.
in most situations this will be used in the alarm message too, but for situations like
a cloud based receiver, the account in the map will be different.
params
a map with key-value pairs.
at this moment only the more commonly used fields are used.
the currently handled keys are:
account
the account number.
most receivers expect 4 to 8 numeric digits
area
the area number in which the event happened
(area is a part of an installation that can arm and disarm independently)
zone
the alarm zone.
code
the event code in 3 numbers according to the DC05 standard.
q
the qualifier defining the state of the alarm.
1 means new alarm
3 means new restore
6 means old alarm
"""
account = param.strpar(params, 'account', spt_account)
zone = param.numpar(params, 'zone', '000')
user = param.numpar(params, 'user', None)
msg = ''
if account is None:
msg += '#0000|'
else:
msg += '#' + account + '|'
code = param.numpar(params, 'code', '602')
if len(code) != 3:
raise Exception('Code should be 3 positions')
q = param.numpar(params, 'q', '1')
if q != '1' and q != '3' and q != '3':
raise Exception('Qualifier q should be 1 or 3 or 6')
area = param.numpar(params, 'area', '00')
if len(area) != 2:
area = ('00' + area)[-2:]
if dc05_codes.dc05_is_user(code) and user is not None:
if len(user) != 3:
user = ('000' + user)[-3:]
msg += q + code + ' ' + area + ' ' + user + ']'
else:
if len(zone) != 3:
zone = ('000' + zone)[-3:]
msg += q + code + ' ' + area + ' ' + zone + ']'
return msg
| en | 0.853544 | # ---------------------------- # Class to implement the SIA DC05 message # (c 2018 van Ovost Automatisering b.v. # Author : <NAME> # ---------------------------- Copyright (c) 2018 van Ovost Automatisering b.v. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. you may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Some special codes Codes that have the user number following the code. Note that there is no way to transfer a zone in the message Construct a DC05 message, also called Ademco Contact ID Parameters spt_account the account of the alarm transceiver. in most situations this will be used in the alarm message too, but for situations like a cloud based receiver, the account in the map will be different. params a map with key-value pairs. at this moment only the more commonly used fields are used. the currently handled keys are: account the account number. most receivers expect 4 to 8 numeric digits area the area number in which the event happened (area is a part of an installation that can arm and disarm independently) zone the alarm zone. code the event code in 3 numbers according to the DC05 standard. q the qualifier defining the state of the alarm. 1 means new alarm 3 means new restore 6 means old alarm | 2.06406 | 2 |
5_kicom_ml/kicomav-master/Engine/plugins/extract.py | Kimchangheon/malware_ML | 4 | 6633169 | <reponame>Kimchangheon/malware_ML<filename>5_kicom_ml/kicomav-master/Engine/plugins/extract.py
# pip install pefile
# pip install yara
# find / -name libyara.so
# cp LIB_YARA_PATH /home/stud/anaconda2/envs/ml_sec_2.7/lib/
import csv,os,pefile
import yara
import math
import hashlib
class PE_features():
IMAGE_DOS_HEADER = [
"e_cblp",\
"e_cp", \
"e_cparhdr",\
"e_maxalloc",\
"e_sp",\
"e_lfanew"]
FILE_HEADER= ["NumberOfSections","CreationYear"] + [ "FH_char" + str(i) for i in range(15)]
OPTIONAL_HEADER1 = [
"MajorLinkerVersion",\
"MinorLinkerVersion",\
"SizeOfCode",\
"SizeOfInitializedData",\
"SizeOfUninitializedData",\
"AddressOfEntryPoint",\
"BaseOfCode",\
"BaseOfData",\
"ImageBase",\
"SectionAlignment",\
"FileAlignment",\
"MajorOperatingSystemVersion",\
"MinorOperatingSystemVersion",\
"MajorImageVersion",\
"MinorImageVersion",\
"MajorSubsystemVersion",\
"MinorSubsystemVersion",\
"SizeOfImage",\
"SizeOfHeaders",\
"CheckSum",\
"Subsystem"]
OPTIONAL_HEADER_DLL_char = [ "OH_DLLchar" + str(i) for i in range(11)]
OPTIONAL_HEADER2 = [
"SizeOfStackReserve",\
"SizeOfStackCommit",\
"SizeOfHeapReserve",\
"SizeOfHeapCommit",\
"LoaderFlags"] # boolean check for zero or not
OPTIONAL_HEADER = OPTIONAL_HEADER1 + OPTIONAL_HEADER_DLL_char + OPTIONAL_HEADER2
Derived_header = ["sus_sections","non_sus_sections", "packer","packer_type","E_text","E_data","filesize","E_file","fileinfo"]
def __init__(self,source):
self.source = source
self.rules= yara.compile(filepath='./peid.yara')
def file_creation_year(self,seconds):
tmp = 1970 + ((int(seconds) / 86400) / 365)
return int(tmp in range (1980,2016))
def FILE_HEADER_Char_boolean_set(self,pe):
tmp = [pe.FILE_HEADER.IMAGE_FILE_RELOCS_STRIPPED,\
pe.FILE_HEADER.IMAGE_FILE_EXECUTABLE_IMAGE,\
pe.FILE_HEADER.IMAGE_FILE_LINE_NUMS_STRIPPED,\
pe.FILE_HEADER.IMAGE_FILE_LOCAL_SYMS_STRIPPED,\
pe.FILE_HEADER.IMAGE_FILE_AGGRESIVE_WS_TRIM,\
pe.FILE_HEADER.IMAGE_FILE_LARGE_ADDRESS_AWARE,\
pe.FILE_HEADER.IMAGE_FILE_BYTES_REVERSED_LO,\
pe.FILE_HEADER.IMAGE_FILE_32BIT_MACHINE,\
pe.FILE_HEADER.IMAGE_FILE_DEBUG_STRIPPED,\
pe.FILE_HEADER.IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP,\
pe.FILE_HEADER.IMAGE_FILE_NET_RUN_FROM_SWAP,\
pe.FILE_HEADER.IMAGE_FILE_SYSTEM,\
pe.FILE_HEADER.IMAGE_FILE_DLL,\
pe.FILE_HEADER.IMAGE_FILE_UP_SYSTEM_ONLY,\
pe.FILE_HEADER.IMAGE_FILE_BYTES_REVERSED_HI
]
return [int(s) for s in tmp]
def OPTIONAL_HEADER_DLLChar(self,pe):
tmp = [
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_NX_COMPAT ,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_NO_ISOLATION,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_NO_SEH,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_NO_BIND,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_WDM_DRIVER,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_APPCONTAINER,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_GUARD_CF
]
return [int(s) for s in tmp]
def Optional_header_ImageBase(self,ImageBase):
result= 0
if ImageBase % (64 * 1024) == 0 and ImageBase in [268435456,65536,4194304]:
result = 1
return result
def Optional_header_SectionAlignment(self,SectionAlignment,FileAlignment):
"""This is boolean function and will return 0 or 1 based on condidtions
that it SectionAlignment must be greater than or equal to FileAlignment
"""
return int(SectionAlignment >= FileAlignment)
def Optional_header_FileAlignment(self,SectionAlignment,FileAlignment):
result =0
if SectionAlignment >= 512:
if FileAlignment % 2 == 0 and FileAlignment in range(512,65537):
result =1
else:
if FileAlignment == SectionAlignment:
result = 1
return result
def Optional_header_SizeOfImage(self,SizeOfImage,SectionAlignment):
return int(SizeOfImage % SectionAlignment == 0)
def Optional_header_SizeOfHeaders(self,SizeOfHeaders,FileAlignment):
return int(SizeOfHeaders % FileAlignment == 0 )
def extract_dos_header(self,pe):
IMAGE_DOS_HEADER_data = [ 0 for i in range(6)]
try:
IMAGE_DOS_HEADER_data = [
pe.DOS_HEADER.e_cblp,\
pe.DOS_HEADER.e_cp, \
pe.DOS_HEADER.e_cparhdr,\
pe.DOS_HEADER.e_maxalloc,\
pe.DOS_HEADER.e_sp,\
pe.DOS_HEADER.e_lfanew]
except Exception, e:
print e
return IMAGE_DOS_HEADER_data
def extract_file_header(self,pe):
FILE_HEADER_data = [ 0 for i in range(3)]
FILE_HEADER_char = []
try:
FILE_HEADER_data = [
pe.FILE_HEADER.NumberOfSections, \
self.file_creation_year(pe.FILE_HEADER.TimeDateStamp)]
FILE_HEADER_char = self.FILE_HEADER_Char_boolean_set(pe)
except Exception, e:
print e
return FILE_HEADER_data + FILE_HEADER_char
def extract_optional_header(self,pe):
OPTIONAL_HEADER_data = [ 0 for i in range(21)]
DLL_char =[]
OPTIONAL_HEADER_data2 = [ 0 for i in range(6)]
try:
OPTIONAL_HEADER_data = [
pe.OPTIONAL_HEADER.MajorLinkerVersion,\
pe.OPTIONAL_HEADER.MinorLinkerVersion,\
pe.OPTIONAL_HEADER.SizeOfCode,\
pe.OPTIONAL_HEADER.SizeOfInitializedData,\
pe.OPTIONAL_HEADER.SizeOfUninitializedData,\
pe.OPTIONAL_HEADER.AddressOfEntryPoint,\
pe.OPTIONAL_HEADER.BaseOfCode,\
pe.OPTIONAL_HEADER.BaseOfData,\
#Check the ImageBase for the condition
self.Optional_header_ImageBase(pe.OPTIONAL_HEADER.ImageBase),\
# Checking for SectionAlignment condition
self.Optional_header_SectionAlignment(pe.OPTIONAL_HEADER.SectionAlignment,pe.OPTIONAL_HEADER.FileAlignment),\
#Checking for FileAlignment condition
self.Optional_header_FileAlignment(pe.OPTIONAL_HEADER.SectionAlignment,pe.OPTIONAL_HEADER.FileAlignment),\
pe.OPTIONAL_HEADER.MajorOperatingSystemVersion,\
pe.OPTIONAL_HEADER.MinorOperatingSystemVersion,\
pe.OPTIONAL_HEADER.MajorImageVersion,\
pe.OPTIONAL_HEADER.MinorImageVersion,\
pe.OPTIONAL_HEADER.MajorSubsystemVersion,\
pe.OPTIONAL_HEADER.MinorSubsystemVersion,\
#Checking size of Image
self.Optional_header_SizeOfImage(pe.OPTIONAL_HEADER.SizeOfImage,pe.OPTIONAL_HEADER.SectionAlignment),\
#Checking for size of headers
self.Optional_header_SizeOfHeaders(pe.OPTIONAL_HEADER.SizeOfHeaders,pe.OPTIONAL_HEADER.FileAlignment),\
pe.OPTIONAL_HEADER.CheckSum,\
pe.OPTIONAL_HEADER.Subsystem]
DLL_char = self.OPTIONAL_HEADER_DLLChar(pe)
OPTIONAL_HEADER_data2= [
pe.OPTIONAL_HEADER.SizeOfStackReserve,\
pe.OPTIONAL_HEADER.SizeOfStackCommit,\
pe.OPTIONAL_HEADER.SizeOfHeapReserve,\
pe.OPTIONAL_HEADER.SizeOfHeapCommit,\
int(pe.OPTIONAL_HEADER.LoaderFlags == 0) ]
except Exception, e:
print e
return OPTIONAL_HEADER_data + DLL_char + OPTIONAL_HEADER_data2
def get_count_suspicious_sections(self,pe):
result=[]
tmp =[]
benign_sections = set(['.text','.data','.rdata','.idata','.edata','.rsrc','.bss','.crt','.tls'])
for section in pe.sections:
tmp.append(section.Name.split('\x00')[0])
non_sus_sections = len(set(tmp).intersection(benign_sections))
result=[len(tmp) - non_sus_sections, non_sus_sections]
return result
def check_packer(self,filepath):
result=[]
matches = self.rules.match(filepath)
try:
if matches == [] or matches == {}:
result.append([0,"NoPacker"])
else:
result.append([1,matches['main'][0]['rule']])
except:
result.append([1,matches[0]])
return result
def get_text_data_entropy(self,pe):
result=[0.0,0.0]
for section in pe.sections:
s_name = section.Name.split('\x00')[0]
if s_name == ".text":
result[0]= section.get_entropy()
elif s_name == ".data":
result[1]= section.get_entropy()
else:
pass
return result
def get_file_bytes_size(self,filepath):
f = open(filepath, "rb")
byteArr = map(ord, f.read())
f.close()
fileSize = len(byteArr)
return byteArr,fileSize
def cal_byteFrequency(self,byteArr,fileSize):
freqList = []
for b in range(256):
ctr = 0
for byte in byteArr:
if byte == b:
ctr += 1
freqList.append(float(ctr) / fileSize)
return freqList
def get_file_entropy(self,filepath):
byteArr, fileSize = self.get_file_bytes_size(filepath)
freqList = self.cal_byteFrequency(byteArr,fileSize)
# Shannon entropy
ent = 0.0
for freq in freqList:
if freq > 0:
ent += - freq * math.log(freq, 2)
#ent = -ent
return [fileSize,ent]
def get_fileinfo(self,pe):
result=[]
try:
FileVersion = pe.FileInfo[0].StringTable[0].entries['FileVersion']
ProductVersion = pe.FileInfo[0].StringTable[0].entries['ProductVersion']
ProductName = pe.FileInfo[0].StringTable[0].entries['ProductName']
CompanyName = pe.FileInfo[0].StringTable[0].entries['CompanyName']
#getting Lower and
FileVersionLS = pe.VS_FIXEDFILEINFO.FileVersionLS
FileVersionMS = pe.VS_FIXEDFILEINFO.FileVersionMS
ProductVersionLS = pe.VS_FIXEDFILEINFO.ProductVersionLS
ProductVersionMS = pe.VS_FIXEDFILEINFO.ProductVersionMS
except Exception, e:
result=["error"]
#print "{} while opening {}".format(e,filepath)
else:
#shifting byte
FileVersion = (FileVersionMS >> 16, FileVersionMS & 0xFFFF, FileVersionLS >> 16, FileVersionLS & 0xFFFF)
ProductVersion = (ProductVersionMS >> 16, ProductVersionMS & 0xFFFF, ProductVersionLS >> 16, ProductVersionLS & 0xFFFF)
result = [FileVersion,ProductVersion,ProductName,CompanyName]
return int ( result[0] != 'error')
def extract_all(self):
data =[]
filepath = self.source
try:
pe = pefile.PE(filepath)
except Exception, e:
print "{} while opening {}".format(e,filepath)
else:
data += self.extract_dos_header(pe)
data += self.extract_file_header(pe)
data += self.extract_optional_header(pe)
num_ss_nss = self.get_count_suspicious_sections(pe)
data += num_ss_nss
packer = self.check_packer(filepath)
data += packer[0]
entropy_sections = self.get_text_data_entropy(pe)
data += entropy_sections
f_size_entropy = self.get_file_entropy(filepath)
data += f_size_entropy
fileinfo = self.get_fileinfo(pe)
data.append(fileinfo)
magic = pe.OPTIONAL_HEADER.Magic
return data, magic
| # pip install pefile
# pip install yara
# find / -name libyara.so
# cp LIB_YARA_PATH /home/stud/anaconda2/envs/ml_sec_2.7/lib/
import csv,os,pefile
import yara
import math
import hashlib
class PE_features():
IMAGE_DOS_HEADER = [
"e_cblp",\
"e_cp", \
"e_cparhdr",\
"e_maxalloc",\
"e_sp",\
"e_lfanew"]
FILE_HEADER= ["NumberOfSections","CreationYear"] + [ "FH_char" + str(i) for i in range(15)]
OPTIONAL_HEADER1 = [
"MajorLinkerVersion",\
"MinorLinkerVersion",\
"SizeOfCode",\
"SizeOfInitializedData",\
"SizeOfUninitializedData",\
"AddressOfEntryPoint",\
"BaseOfCode",\
"BaseOfData",\
"ImageBase",\
"SectionAlignment",\
"FileAlignment",\
"MajorOperatingSystemVersion",\
"MinorOperatingSystemVersion",\
"MajorImageVersion",\
"MinorImageVersion",\
"MajorSubsystemVersion",\
"MinorSubsystemVersion",\
"SizeOfImage",\
"SizeOfHeaders",\
"CheckSum",\
"Subsystem"]
OPTIONAL_HEADER_DLL_char = [ "OH_DLLchar" + str(i) for i in range(11)]
OPTIONAL_HEADER2 = [
"SizeOfStackReserve",\
"SizeOfStackCommit",\
"SizeOfHeapReserve",\
"SizeOfHeapCommit",\
"LoaderFlags"] # boolean check for zero or not
OPTIONAL_HEADER = OPTIONAL_HEADER1 + OPTIONAL_HEADER_DLL_char + OPTIONAL_HEADER2
Derived_header = ["sus_sections","non_sus_sections", "packer","packer_type","E_text","E_data","filesize","E_file","fileinfo"]
def __init__(self,source):
self.source = source
self.rules= yara.compile(filepath='./peid.yara')
def file_creation_year(self,seconds):
tmp = 1970 + ((int(seconds) / 86400) / 365)
return int(tmp in range (1980,2016))
def FILE_HEADER_Char_boolean_set(self,pe):
tmp = [pe.FILE_HEADER.IMAGE_FILE_RELOCS_STRIPPED,\
pe.FILE_HEADER.IMAGE_FILE_EXECUTABLE_IMAGE,\
pe.FILE_HEADER.IMAGE_FILE_LINE_NUMS_STRIPPED,\
pe.FILE_HEADER.IMAGE_FILE_LOCAL_SYMS_STRIPPED,\
pe.FILE_HEADER.IMAGE_FILE_AGGRESIVE_WS_TRIM,\
pe.FILE_HEADER.IMAGE_FILE_LARGE_ADDRESS_AWARE,\
pe.FILE_HEADER.IMAGE_FILE_BYTES_REVERSED_LO,\
pe.FILE_HEADER.IMAGE_FILE_32BIT_MACHINE,\
pe.FILE_HEADER.IMAGE_FILE_DEBUG_STRIPPED,\
pe.FILE_HEADER.IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP,\
pe.FILE_HEADER.IMAGE_FILE_NET_RUN_FROM_SWAP,\
pe.FILE_HEADER.IMAGE_FILE_SYSTEM,\
pe.FILE_HEADER.IMAGE_FILE_DLL,\
pe.FILE_HEADER.IMAGE_FILE_UP_SYSTEM_ONLY,\
pe.FILE_HEADER.IMAGE_FILE_BYTES_REVERSED_HI
]
return [int(s) for s in tmp]
def OPTIONAL_HEADER_DLLChar(self,pe):
tmp = [
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_NX_COMPAT ,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_NO_ISOLATION,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_NO_SEH,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_NO_BIND,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_WDM_DRIVER,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_APPCONTAINER,\
pe.OPTIONAL_HEADER.IMAGE_DLLCHARACTERISTICS_GUARD_CF
]
return [int(s) for s in tmp]
def Optional_header_ImageBase(self,ImageBase):
result= 0
if ImageBase % (64 * 1024) == 0 and ImageBase in [268435456,65536,4194304]:
result = 1
return result
def Optional_header_SectionAlignment(self,SectionAlignment,FileAlignment):
"""This is boolean function and will return 0 or 1 based on condidtions
that it SectionAlignment must be greater than or equal to FileAlignment
"""
return int(SectionAlignment >= FileAlignment)
def Optional_header_FileAlignment(self,SectionAlignment,FileAlignment):
result =0
if SectionAlignment >= 512:
if FileAlignment % 2 == 0 and FileAlignment in range(512,65537):
result =1
else:
if FileAlignment == SectionAlignment:
result = 1
return result
def Optional_header_SizeOfImage(self,SizeOfImage,SectionAlignment):
return int(SizeOfImage % SectionAlignment == 0)
def Optional_header_SizeOfHeaders(self,SizeOfHeaders,FileAlignment):
return int(SizeOfHeaders % FileAlignment == 0 )
def extract_dos_header(self,pe):
IMAGE_DOS_HEADER_data = [ 0 for i in range(6)]
try:
IMAGE_DOS_HEADER_data = [
pe.DOS_HEADER.e_cblp,\
pe.DOS_HEADER.e_cp, \
pe.DOS_HEADER.e_cparhdr,\
pe.DOS_HEADER.e_maxalloc,\
pe.DOS_HEADER.e_sp,\
pe.DOS_HEADER.e_lfanew]
except Exception, e:
print e
return IMAGE_DOS_HEADER_data
def extract_file_header(self,pe):
FILE_HEADER_data = [ 0 for i in range(3)]
FILE_HEADER_char = []
try:
FILE_HEADER_data = [
pe.FILE_HEADER.NumberOfSections, \
self.file_creation_year(pe.FILE_HEADER.TimeDateStamp)]
FILE_HEADER_char = self.FILE_HEADER_Char_boolean_set(pe)
except Exception, e:
print e
return FILE_HEADER_data + FILE_HEADER_char
def extract_optional_header(self,pe):
OPTIONAL_HEADER_data = [ 0 for i in range(21)]
DLL_char =[]
OPTIONAL_HEADER_data2 = [ 0 for i in range(6)]
try:
OPTIONAL_HEADER_data = [
pe.OPTIONAL_HEADER.MajorLinkerVersion,\
pe.OPTIONAL_HEADER.MinorLinkerVersion,\
pe.OPTIONAL_HEADER.SizeOfCode,\
pe.OPTIONAL_HEADER.SizeOfInitializedData,\
pe.OPTIONAL_HEADER.SizeOfUninitializedData,\
pe.OPTIONAL_HEADER.AddressOfEntryPoint,\
pe.OPTIONAL_HEADER.BaseOfCode,\
pe.OPTIONAL_HEADER.BaseOfData,\
#Check the ImageBase for the condition
self.Optional_header_ImageBase(pe.OPTIONAL_HEADER.ImageBase),\
# Checking for SectionAlignment condition
self.Optional_header_SectionAlignment(pe.OPTIONAL_HEADER.SectionAlignment,pe.OPTIONAL_HEADER.FileAlignment),\
#Checking for FileAlignment condition
self.Optional_header_FileAlignment(pe.OPTIONAL_HEADER.SectionAlignment,pe.OPTIONAL_HEADER.FileAlignment),\
pe.OPTIONAL_HEADER.MajorOperatingSystemVersion,\
pe.OPTIONAL_HEADER.MinorOperatingSystemVersion,\
pe.OPTIONAL_HEADER.MajorImageVersion,\
pe.OPTIONAL_HEADER.MinorImageVersion,\
pe.OPTIONAL_HEADER.MajorSubsystemVersion,\
pe.OPTIONAL_HEADER.MinorSubsystemVersion,\
#Checking size of Image
self.Optional_header_SizeOfImage(pe.OPTIONAL_HEADER.SizeOfImage,pe.OPTIONAL_HEADER.SectionAlignment),\
#Checking for size of headers
self.Optional_header_SizeOfHeaders(pe.OPTIONAL_HEADER.SizeOfHeaders,pe.OPTIONAL_HEADER.FileAlignment),\
pe.OPTIONAL_HEADER.CheckSum,\
pe.OPTIONAL_HEADER.Subsystem]
DLL_char = self.OPTIONAL_HEADER_DLLChar(pe)
OPTIONAL_HEADER_data2= [
pe.OPTIONAL_HEADER.SizeOfStackReserve,\
pe.OPTIONAL_HEADER.SizeOfStackCommit,\
pe.OPTIONAL_HEADER.SizeOfHeapReserve,\
pe.OPTIONAL_HEADER.SizeOfHeapCommit,\
int(pe.OPTIONAL_HEADER.LoaderFlags == 0) ]
except Exception, e:
print e
return OPTIONAL_HEADER_data + DLL_char + OPTIONAL_HEADER_data2
def get_count_suspicious_sections(self,pe):
result=[]
tmp =[]
benign_sections = set(['.text','.data','.rdata','.idata','.edata','.rsrc','.bss','.crt','.tls'])
for section in pe.sections:
tmp.append(section.Name.split('\x00')[0])
non_sus_sections = len(set(tmp).intersection(benign_sections))
result=[len(tmp) - non_sus_sections, non_sus_sections]
return result
def check_packer(self,filepath):
result=[]
matches = self.rules.match(filepath)
try:
if matches == [] or matches == {}:
result.append([0,"NoPacker"])
else:
result.append([1,matches['main'][0]['rule']])
except:
result.append([1,matches[0]])
return result
def get_text_data_entropy(self,pe):
result=[0.0,0.0]
for section in pe.sections:
s_name = section.Name.split('\x00')[0]
if s_name == ".text":
result[0]= section.get_entropy()
elif s_name == ".data":
result[1]= section.get_entropy()
else:
pass
return result
def get_file_bytes_size(self,filepath):
f = open(filepath, "rb")
byteArr = map(ord, f.read())
f.close()
fileSize = len(byteArr)
return byteArr,fileSize
def cal_byteFrequency(self,byteArr,fileSize):
freqList = []
for b in range(256):
ctr = 0
for byte in byteArr:
if byte == b:
ctr += 1
freqList.append(float(ctr) / fileSize)
return freqList
def get_file_entropy(self,filepath):
byteArr, fileSize = self.get_file_bytes_size(filepath)
freqList = self.cal_byteFrequency(byteArr,fileSize)
# Shannon entropy
ent = 0.0
for freq in freqList:
if freq > 0:
ent += - freq * math.log(freq, 2)
#ent = -ent
return [fileSize,ent]
def get_fileinfo(self,pe):
result=[]
try:
FileVersion = pe.FileInfo[0].StringTable[0].entries['FileVersion']
ProductVersion = pe.FileInfo[0].StringTable[0].entries['ProductVersion']
ProductName = pe.FileInfo[0].StringTable[0].entries['ProductName']
CompanyName = pe.FileInfo[0].StringTable[0].entries['CompanyName']
#getting Lower and
FileVersionLS = pe.VS_FIXEDFILEINFO.FileVersionLS
FileVersionMS = pe.VS_FIXEDFILEINFO.FileVersionMS
ProductVersionLS = pe.VS_FIXEDFILEINFO.ProductVersionLS
ProductVersionMS = pe.VS_FIXEDFILEINFO.ProductVersionMS
except Exception, e:
result=["error"]
#print "{} while opening {}".format(e,filepath)
else:
#shifting byte
FileVersion = (FileVersionMS >> 16, FileVersionMS & 0xFFFF, FileVersionLS >> 16, FileVersionLS & 0xFFFF)
ProductVersion = (ProductVersionMS >> 16, ProductVersionMS & 0xFFFF, ProductVersionLS >> 16, ProductVersionLS & 0xFFFF)
result = [FileVersion,ProductVersion,ProductName,CompanyName]
return int ( result[0] != 'error')
def extract_all(self):
data =[]
filepath = self.source
try:
pe = pefile.PE(filepath)
except Exception, e:
print "{} while opening {}".format(e,filepath)
else:
data += self.extract_dos_header(pe)
data += self.extract_file_header(pe)
data += self.extract_optional_header(pe)
num_ss_nss = self.get_count_suspicious_sections(pe)
data += num_ss_nss
packer = self.check_packer(filepath)
data += packer[0]
entropy_sections = self.get_text_data_entropy(pe)
data += entropy_sections
f_size_entropy = self.get_file_entropy(filepath)
data += f_size_entropy
fileinfo = self.get_fileinfo(pe)
data.append(fileinfo)
magic = pe.OPTIONAL_HEADER.Magic
return data, magic | en | 0.701808 | # pip install pefile # pip install yara # find / -name libyara.so # cp LIB_YARA_PATH /home/stud/anaconda2/envs/ml_sec_2.7/lib/ # boolean check for zero or not This is boolean function and will return 0 or 1 based on condidtions that it SectionAlignment must be greater than or equal to FileAlignment #Check the ImageBase for the condition # Checking for SectionAlignment condition #Checking for FileAlignment condition #Checking size of Image #Checking for size of headers # Shannon entropy #ent = -ent #getting Lower and #print "{} while opening {}".format(e,filepath) #shifting byte | 2.114234 | 2 |
tests/ast/nodes/test_evaluate_binop_decimal.py | ryan-rozario/vyper | 0 | 6633170 | <reponame>ryan-rozario/vyper
from decimal import (
Decimal,
)
from hypothesis import (
example,
given,
settings,
strategies as st,
)
import pytest
from vyper import (
ast as vy_ast,
)
from vyper.exceptions import (
TypeMismatch,
ZeroDivisionException,
)
st_decimals = st.decimals(
min_value=-2 ** 32,
max_value=2 ** 32,
allow_nan=False,
allow_infinity=False,
places=10,
)
@pytest.mark.fuzzing
@settings(max_examples=50, deadline=1000)
@given(left=st_decimals, right=st_decimals)
@example(left=Decimal("0.9999999999"), right=Decimal("0.0000000001"))
@example(left=Decimal("0.0000000001"), right=Decimal("0.9999999999"))
@example(left=Decimal("0.9999999999"), right=Decimal("0.9999999999"))
@example(left=Decimal("0.0000000001"), right=Decimal("0.0000000001"))
@pytest.mark.parametrize("op", "+-*/%")
def test_binop_decimal(get_contract, assert_tx_failed, op, left, right):
source = f"""
@public
def foo(a: decimal, b: decimal) -> decimal:
return a {op} b
"""
contract = get_contract(source)
vyper_ast = vy_ast.parse_to_ast(f"{left} {op} {right}")
old_node = vyper_ast.body[0].value
try:
new_node = old_node.evaluate()
is_valid = True
except ZeroDivisionException:
is_valid = False
if is_valid:
assert contract.foo(left, right) == new_node.value
else:
assert_tx_failed(lambda: contract.foo(left, right))
def test_binop_pow():
# raises because Vyper does not support decimal exponentiation
vyper_ast = vy_ast.parse_to_ast(f"3.1337 ** 4.2")
old_node = vyper_ast.body[0].value
with pytest.raises(TypeMismatch):
old_node.evaluate()
@pytest.mark.fuzzing
@settings(max_examples=50, deadline=1000)
@given(
values=st.lists(st_decimals, min_size=2, max_size=10),
ops=st.lists(st.sampled_from("+-*/%"), min_size=11, max_size=11),
)
def test_nested(get_contract, assert_tx_failed, values, ops):
variables = "abcdefghij"
input_value = ",".join(f"{i}: decimal" for i in variables[: len(values)])
return_value = " ".join(f"{a} {b}" for a, b in zip(variables[: len(values)], ops))
return_value = return_value.rsplit(maxsplit=1)[0]
source = f"""
@public
def foo({input_value}) -> decimal:
return {return_value}
"""
contract = get_contract(source)
literal_op = " ".join(f"{a} {b}" for a, b in zip(values, ops))
literal_op = literal_op.rsplit(maxsplit=1)[0]
vyper_ast = vy_ast.parse_to_ast(literal_op)
try:
vy_ast.folding.replace_literal_ops(vyper_ast)
expected = vyper_ast.body[0].value.value
is_valid = True
except ZeroDivisionException:
# for division/modulus by 0, expect the contract call to revert
is_valid = False
if is_valid:
assert contract.foo(*values) == expected
else:
assert_tx_failed(lambda: contract.foo(*values))
| from decimal import (
Decimal,
)
from hypothesis import (
example,
given,
settings,
strategies as st,
)
import pytest
from vyper import (
ast as vy_ast,
)
from vyper.exceptions import (
TypeMismatch,
ZeroDivisionException,
)
st_decimals = st.decimals(
min_value=-2 ** 32,
max_value=2 ** 32,
allow_nan=False,
allow_infinity=False,
places=10,
)
@pytest.mark.fuzzing
@settings(max_examples=50, deadline=1000)
@given(left=st_decimals, right=st_decimals)
@example(left=Decimal("0.9999999999"), right=Decimal("0.0000000001"))
@example(left=Decimal("0.0000000001"), right=Decimal("0.9999999999"))
@example(left=Decimal("0.9999999999"), right=Decimal("0.9999999999"))
@example(left=Decimal("0.0000000001"), right=Decimal("0.0000000001"))
@pytest.mark.parametrize("op", "+-*/%")
def test_binop_decimal(get_contract, assert_tx_failed, op, left, right):
source = f"""
@public
def foo(a: decimal, b: decimal) -> decimal:
return a {op} b
"""
contract = get_contract(source)
vyper_ast = vy_ast.parse_to_ast(f"{left} {op} {right}")
old_node = vyper_ast.body[0].value
try:
new_node = old_node.evaluate()
is_valid = True
except ZeroDivisionException:
is_valid = False
if is_valid:
assert contract.foo(left, right) == new_node.value
else:
assert_tx_failed(lambda: contract.foo(left, right))
def test_binop_pow():
# raises because Vyper does not support decimal exponentiation
vyper_ast = vy_ast.parse_to_ast(f"3.1337 ** 4.2")
old_node = vyper_ast.body[0].value
with pytest.raises(TypeMismatch):
old_node.evaluate()
@pytest.mark.fuzzing
@settings(max_examples=50, deadline=1000)
@given(
values=st.lists(st_decimals, min_size=2, max_size=10),
ops=st.lists(st.sampled_from("+-*/%"), min_size=11, max_size=11),
)
def test_nested(get_contract, assert_tx_failed, values, ops):
variables = "abcdefghij"
input_value = ",".join(f"{i}: decimal" for i in variables[: len(values)])
return_value = " ".join(f"{a} {b}" for a, b in zip(variables[: len(values)], ops))
return_value = return_value.rsplit(maxsplit=1)[0]
source = f"""
@public
def foo({input_value}) -> decimal:
return {return_value}
"""
contract = get_contract(source)
literal_op = " ".join(f"{a} {b}" for a, b in zip(values, ops))
literal_op = literal_op.rsplit(maxsplit=1)[0]
vyper_ast = vy_ast.parse_to_ast(literal_op)
try:
vy_ast.folding.replace_literal_ops(vyper_ast)
expected = vyper_ast.body[0].value.value
is_valid = True
except ZeroDivisionException:
# for division/modulus by 0, expect the contract call to revert
is_valid = False
if is_valid:
assert contract.foo(*values) == expected
else:
assert_tx_failed(lambda: contract.foo(*values)) | en | 0.581701 | @public def foo(a: decimal, b: decimal) -> decimal: return a {op} b # raises because Vyper does not support decimal exponentiation @public def foo({input_value}) -> decimal: return {return_value} # for division/modulus by 0, expect the contract call to revert | 2.480257 | 2 |
Chapter03/Activities/Activity_09.py | talendteams/Data-Science-with-Python | 28 | 6633171 | <filename>Chapter03/Activities/Activity_09.py<gh_stars>10-100
# Activity 5: Generating predictions and evaluating performance of decision tree classifier model
# continuing from Exercise 11:
# generate predicted probabilities of rain
predicted_prob = model.predict_proba(X_test_scaled)[:,1]
# generate predicted classes
predicted_class = model.predict(X_test_scaled)
# evaluate performance with confusion matrix
from sklearn.metrics import confusion_matrix
import numpy as np
cm = pd.DataFrame(confusion_matrix(y_test, predicted_class))
cm['Total'] = np.sum(cm, axis=1)
cm = cm.append(np.sum(cm, axis=0), ignore_index=True)
cm.columns = ['Predicted No', 'Predicted Yes', 'Total']
cm = cm.set_index([['Actual No', 'Actual Yes', 'Total']])
print(cm)
# generate a classification report
from sklearn.metrics import classification_report
print(classification_report(y_test, predicted_class))
| <filename>Chapter03/Activities/Activity_09.py<gh_stars>10-100
# Activity 5: Generating predictions and evaluating performance of decision tree classifier model
# continuing from Exercise 11:
# generate predicted probabilities of rain
predicted_prob = model.predict_proba(X_test_scaled)[:,1]
# generate predicted classes
predicted_class = model.predict(X_test_scaled)
# evaluate performance with confusion matrix
from sklearn.metrics import confusion_matrix
import numpy as np
cm = pd.DataFrame(confusion_matrix(y_test, predicted_class))
cm['Total'] = np.sum(cm, axis=1)
cm = cm.append(np.sum(cm, axis=0), ignore_index=True)
cm.columns = ['Predicted No', 'Predicted Yes', 'Total']
cm = cm.set_index([['Actual No', 'Actual Yes', 'Total']])
print(cm)
# generate a classification report
from sklearn.metrics import classification_report
print(classification_report(y_test, predicted_class))
| en | 0.77916 | # Activity 5: Generating predictions and evaluating performance of decision tree classifier model # continuing from Exercise 11: # generate predicted probabilities of rain # generate predicted classes # evaluate performance with confusion matrix # generate a classification report | 3.439009 | 3 |
MelodyHouse/upload_app/urls.py | Koushik-Sarker-Seemanto/Project-350 | 6 | 6633172 | <reponame>Koushik-Sarker-Seemanto/Project-350
from django.conf.urls import url
from django.urls import path, include
from . import views
app_name = 'upload_app'
urlpatterns = [
# url(r'^signup/$', views.signupView, name='signup'),
path('upload/', views.addAlbum, name='add-album'),
]
| from django.conf.urls import url
from django.urls import path, include
from . import views
app_name = 'upload_app'
urlpatterns = [
# url(r'^signup/$', views.signupView, name='signup'),
path('upload/', views.addAlbum, name='add-album'),
] | en | 0.359628 | # url(r'^signup/$', views.signupView, name='signup'), | 1.656607 | 2 |
mlonmcu/models/options.py | tum-ei-eda/mlonmcu | 3 | 6633173 | <gh_stars>1-10
#
# Copyright (c) 2022 TUM Department of Electrical and Computer Engineering.
#
# This file is part of MLonMCU.
# See https://github.com/tum-ei-eda/mlonmcu.git for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class BackendModelOptions:
def __init__(self, backend, supported=True, options={}):
self.backend = backend
self.supported = supported
self.options = options
class TFLMIModelOptions(BackendModelOptions):
def __init__(
self,
backend,
supported=True,
arena_size=None,
builtin_ops=None,
custom_ops=None,
):
super().__init__(backend, supported=supported)
self.arena_size = arena_size
self.builtin_ops = builtin_ops
self.custom_ops = custom_ops
class TVMRTModelOptions(BackendModelOptions):
def __init__(self, backend, supported=True, arena_size=None):
super().__init__(backend, supported=supported)
self.arena_size = arena_size
def parse_model_options_for_backend(backend, options):
backend_types = {
"tflmi": TFLMIModelOptions,
"tvmrt": TVMRTModelOptions,
}
if backend in backend_types:
backend_type = backend_types[backend]
else:
backend_type = BackendModelOptions
backend_options = backend_type(backend)
for key, value in options.items():
setattr(backend_options, key, value)
return backend_options
| #
# Copyright (c) 2022 TUM Department of Electrical and Computer Engineering.
#
# This file is part of MLonMCU.
# See https://github.com/tum-ei-eda/mlonmcu.git for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class BackendModelOptions:
def __init__(self, backend, supported=True, options={}):
self.backend = backend
self.supported = supported
self.options = options
class TFLMIModelOptions(BackendModelOptions):
def __init__(
self,
backend,
supported=True,
arena_size=None,
builtin_ops=None,
custom_ops=None,
):
super().__init__(backend, supported=supported)
self.arena_size = arena_size
self.builtin_ops = builtin_ops
self.custom_ops = custom_ops
class TVMRTModelOptions(BackendModelOptions):
def __init__(self, backend, supported=True, arena_size=None):
super().__init__(backend, supported=supported)
self.arena_size = arena_size
def parse_model_options_for_backend(backend, options):
backend_types = {
"tflmi": TFLMIModelOptions,
"tvmrt": TVMRTModelOptions,
}
if backend in backend_types:
backend_type = backend_types[backend]
else:
backend_type = BackendModelOptions
backend_options = backend_type(backend)
for key, value in options.items():
setattr(backend_options, key, value)
return backend_options | en | 0.861011 | # # Copyright (c) 2022 TUM Department of Electrical and Computer Engineering. # # This file is part of MLonMCU. # See https://github.com/tum-ei-eda/mlonmcu.git for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # | 1.943375 | 2 |
userprofile/admin.py | FiniteElementries/barebone_server | 0 | 6633174 | <reponame>FiniteElementries/barebone_server
from django.contrib import admin
from userprofile.models import UserProfile
# Register your models here.
admin.site.register(UserProfile) | from django.contrib import admin
from userprofile.models import UserProfile
# Register your models here.
admin.site.register(UserProfile) | en | 0.968259 | # Register your models here. | 1.381042 | 1 |
jetbot/__init__.py | sibnick/jetbot | 29 | 6633175 | <gh_stars>10-100
from .camera import Camera
from .heartbeat import Heartbeat
from .motor import Motor
from .robot import Robot
from .image import bgr8_to_jpeg
from .object_detection import ObjectDetector
from .ads1115 import ADS1115
from .ina219 import INA219 | from .camera import Camera
from .heartbeat import Heartbeat
from .motor import Motor
from .robot import Robot
from .image import bgr8_to_jpeg
from .object_detection import ObjectDetector
from .ads1115 import ADS1115
from .ina219 import INA219 | none | 1 | 0.95916 | 1 |
|
Codes/Mathematical/large_number_gcd.py | datta-agni/Python-Codes | 0 | 6633176 | <reponame>datta-agni/Python-Codes
# program to find GCD of two numbers such that the second number can be very large
# function to find gcd of two integer numbers
def gcd(a, b):
if a == 0:
return b
return gcd(b % a, a)
# Here 'a' is integer and 'b' is string. The idea is to make the second number (represented as b) less than and equal to first number by calculating its mod with first integer number using basic mathematics.
def reduceB(a, b):
# Initialize result
mod = 0
# Calculating mod of b with a to make b like 0 <= b < a
for i in range(0, len(b)):
mod = (mod * 10 + ord(b[i])) % a
# return modulo
return mod
# This function returns GCD of 'a' and 'b' where b can be very large and is represented as a character array or string
def gcdLarge(a, b):
# Reduce 'b' (second number) after modulo with a
num = reduceB(a, b)
# gcd of two numbers
return gcd(a, num)
def input_number():
return int(input("Enter the number: "))
def main():
# First number which is integer
a = input_number()
# Second number is represented as string because it can not be handled by integer data type
b = str(input_number())
if a == 0:
print(b)
else:
print(gcdLarge(a, b))
if __name__ == "__main__":
main()
| # program to find GCD of two numbers such that the second number can be very large
# function to find gcd of two integer numbers
def gcd(a, b):
if a == 0:
return b
return gcd(b % a, a)
# Here 'a' is integer and 'b' is string. The idea is to make the second number (represented as b) less than and equal to first number by calculating its mod with first integer number using basic mathematics.
def reduceB(a, b):
# Initialize result
mod = 0
# Calculating mod of b with a to make b like 0 <= b < a
for i in range(0, len(b)):
mod = (mod * 10 + ord(b[i])) % a
# return modulo
return mod
# This function returns GCD of 'a' and 'b' where b can be very large and is represented as a character array or string
def gcdLarge(a, b):
# Reduce 'b' (second number) after modulo with a
num = reduceB(a, b)
# gcd of two numbers
return gcd(a, num)
def input_number():
return int(input("Enter the number: "))
def main():
# First number which is integer
a = input_number()
# Second number is represented as string because it can not be handled by integer data type
b = str(input_number())
if a == 0:
print(b)
else:
print(gcdLarge(a, b))
if __name__ == "__main__":
main() | en | 0.938807 | # program to find GCD of two numbers such that the second number can be very large # function to find gcd of two integer numbers # Here 'a' is integer and 'b' is string. The idea is to make the second number (represented as b) less than and equal to first number by calculating its mod with first integer number using basic mathematics. # Initialize result # Calculating mod of b with a to make b like 0 <= b < a # return modulo # This function returns GCD of 'a' and 'b' where b can be very large and is represented as a character array or string # Reduce 'b' (second number) after modulo with a # gcd of two numbers # First number which is integer # Second number is represented as string because it can not be handled by integer data type | 4.163556 | 4 |
wealthbot/chat/views.py | jliev/wealthbot_chatterbot | 1 | 6633177 | # chat/views.py
from django.shortcuts import render
from django.utils.safestring import mark_safe
import json
from datetime import datetime
from django.http import HttpResponse, Http404
from django.contrib.auth.decorators import login_required
from client.forms import PortfolioForm
from client.models import ClientAccount, AccountGroup
from client.managers.clientPortfolioManager import ClientPortfolioManager
from client.managers.portfolioInformationManager import PortfolioInformationManager
def index(request):
return render(request, 'chat/index.html', {})
def room(request, room_name):
return render(request, 'chat/room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
})
def portfolio(request):
clientPortfolioManager = ClientPortfolioManager()
# Get the user object
client = request.user
print('------index-----', client)
ria = client.profile.ria_user
# Get client's portfolio
clientPortfolio = clientPortfolioManager.getCurrentPortfolio(client=client)
if clientPortfolio is None:
clientPortfolio = clientPortfolioManager.getActivePortfolio(client=client)
if clientPortfolio is None:
raise Http404()
companyInformation = ria.riacompanyinformation
portfolio = clientPortfolio.portfolio
isQualified = manageQualified(
session=request.session,
companyInformation=companyInformation,
isQualified=True,
)
isFinal = False
# If client has final portfolio
if clientPortfolio.isAdvisorApproved():
isFinal = True
if client.profile.registration_step < 4:
profile = client.profile
profile.registration_step = 4
profile.save()
elif clientPortfolio.isProposed():
existWorkflow = None # Skip implementing workflow at this moment
portfolioInformationManager = PortfolioInformationManager()
clientAccounts = ClientAccount.objects.filter(client=client)
retirementAccounts = ClientAccount.objects.filter(client=client,
groupType__group__name=AccountGroup.GROUP_EMPLOYER_RETIREMENT)
form = PortfolioForm()
# Skip document at this moment
documents = {
'ria_investment_management_agreement': '#',
}
portfolio_information = portfolioInformationManager.getPortfolioInformation(user=client, model=portfolio,
isQualified=isQualified)
client.appointedBillingSpec.calcFeeTier()
data = {
'is_final': isFinal,
'client': client,
'client_accounts': clientAccounts,
'total': ClientAccount.getTotalScoreByClient(client=client),
'ria_company_information': companyInformation,
'has_retirement_account': True if retirementAccounts.exists() else False,
'portfolio_information': portfolio_information,
'show_sas_cash': containsSasCash(clientAccounts),
'is_use_qualified_models': companyInformation.is_use_qualified_models,
'form': form,
'signing_date': datetime.now(),
'documents': documents,
'action': 'client_portfolio',
}
return render(request, 'chat/portfolio_index.html', data)
def containsSasCash(accounts=None):
if accounts is not None:
for account in accounts:
if account.sas_cash is not None:
if account.sas_cash > 0:
return True
return False
def manageQualified(session, companyInformation, isQualified):
isUseQualified = companyInformation.is_use_qualified_models
if isUseQualified:
if isQualified != '':
setIsQualifiedModel(session=session, value=isQualified)
isQualified = getIsQualifiedModel(session=session)
else:
isQualified = False
return isQualified | # chat/views.py
from django.shortcuts import render
from django.utils.safestring import mark_safe
import json
from datetime import datetime
from django.http import HttpResponse, Http404
from django.contrib.auth.decorators import login_required
from client.forms import PortfolioForm
from client.models import ClientAccount, AccountGroup
from client.managers.clientPortfolioManager import ClientPortfolioManager
from client.managers.portfolioInformationManager import PortfolioInformationManager
def index(request):
return render(request, 'chat/index.html', {})
def room(request, room_name):
return render(request, 'chat/room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
})
def portfolio(request):
clientPortfolioManager = ClientPortfolioManager()
# Get the user object
client = request.user
print('------index-----', client)
ria = client.profile.ria_user
# Get client's portfolio
clientPortfolio = clientPortfolioManager.getCurrentPortfolio(client=client)
if clientPortfolio is None:
clientPortfolio = clientPortfolioManager.getActivePortfolio(client=client)
if clientPortfolio is None:
raise Http404()
companyInformation = ria.riacompanyinformation
portfolio = clientPortfolio.portfolio
isQualified = manageQualified(
session=request.session,
companyInformation=companyInformation,
isQualified=True,
)
isFinal = False
# If client has final portfolio
if clientPortfolio.isAdvisorApproved():
isFinal = True
if client.profile.registration_step < 4:
profile = client.profile
profile.registration_step = 4
profile.save()
elif clientPortfolio.isProposed():
existWorkflow = None # Skip implementing workflow at this moment
portfolioInformationManager = PortfolioInformationManager()
clientAccounts = ClientAccount.objects.filter(client=client)
retirementAccounts = ClientAccount.objects.filter(client=client,
groupType__group__name=AccountGroup.GROUP_EMPLOYER_RETIREMENT)
form = PortfolioForm()
# Skip document at this moment
documents = {
'ria_investment_management_agreement': '#',
}
portfolio_information = portfolioInformationManager.getPortfolioInformation(user=client, model=portfolio,
isQualified=isQualified)
client.appointedBillingSpec.calcFeeTier()
data = {
'is_final': isFinal,
'client': client,
'client_accounts': clientAccounts,
'total': ClientAccount.getTotalScoreByClient(client=client),
'ria_company_information': companyInformation,
'has_retirement_account': True if retirementAccounts.exists() else False,
'portfolio_information': portfolio_information,
'show_sas_cash': containsSasCash(clientAccounts),
'is_use_qualified_models': companyInformation.is_use_qualified_models,
'form': form,
'signing_date': datetime.now(),
'documents': documents,
'action': 'client_portfolio',
}
return render(request, 'chat/portfolio_index.html', data)
def containsSasCash(accounts=None):
if accounts is not None:
for account in accounts:
if account.sas_cash is not None:
if account.sas_cash > 0:
return True
return False
def manageQualified(session, companyInformation, isQualified):
isUseQualified = companyInformation.is_use_qualified_models
if isUseQualified:
if isQualified != '':
setIsQualifiedModel(session=session, value=isQualified)
isQualified = getIsQualifiedModel(session=session)
else:
isQualified = False
return isQualified | en | 0.727203 | # chat/views.py # Get the user object # Get client's portfolio # If client has final portfolio # Skip implementing workflow at this moment # Skip document at this moment | 1.853194 | 2 |
src/crystal_analysis/detection.py | malramsay64/Crystal_Melting | 0 | 6633178 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""Utilities for handling the trimer molecule."""
import logging
from itertools import product
from pathlib import Path
from typing import List, NamedTuple, Optional
import gsd.hoomd
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.cluster
from bokeh.plotting import gridplot
from scipy.sparse import coo_matrix
from sdanalysis import Frame, HoomdFrame, util
from sdanalysis.figures import plot_frame
from sdanalysis.order import compute_neighbours, create_ml_ordering
logger = logging.getLogger(__name__)
def read_file(
index: int = 0,
pressure: float = 1.00,
temperature: float = 0.40,
crystal: str = "p2",
directory: Optional[Path] = None,
) -> HoomdFrame:
if directory is None:
directory = Path("../data/simulations/interface/output")
fname = f"dump-Trimer-P{pressure:.2f}-T{temperature:.2f}-{crystal}.gsd"
with gsd.hoomd.open(str(directory / fname), "rb") as trj:
return HoomdFrame(trj[index])
class SnapshotData(NamedTuple):
snapshot: HoomdFrame
temperature: str
pressure: str
crystal: str
iteration_id: str
@classmethod
def from_variables(
cls, snapshot: HoomdFrame, variables: util.Variables
) -> "SnapshotData":
return cls(
snapshot=snapshot,
temperature=variables.temperature,
pressure=variables.pressure,
crystal=variables.crystal,
iteration_id=variables.iteration_id,
)
def read_all_files(
directory: Path, index: int = 0, glob: str = "dump-*"
) -> List[SnapshotData]:
directory = Path(directory)
snapshots = []
for file in directory.glob(glob):
with gsd.hoomd.open(str(file), "rb") as trj:
try:
snap = HoomdFrame(trj[index])
except IndexError:
logger.warning(
"Index %d in input file %s doesn't exist, continuing...",
index,
file.name,
)
snapshots.append(
SnapshotData.from_variables(snap, util.get_filename_vars(file))
)
return snapshots
def plot_grid(frames):
for frame in frames:
frame.plot_height = frame.plot_height // 3
frame.plot_width = frame.plot_width // 3
return gridplot(frames, ncols=3)
def plot_clustering(algorithm, X, snapshots, fit=True):
if fit:
clusters = algorithm.fit_predict(X)
else:
clusters = algorithm.predict(X)
cluster_assignment = np.split(clusters, len(snapshots))
fig = plot_grid(
[
plot_frame(snap, order_list=cluster, categorical_colour=True)
for snap, cluster in zip(snapshots, cluster_assignment)
]
)
return fig
def plot_snapshots(snapshots):
return plot_grid([plot_frame(snap) for snap in snapshots])
def classify_mols(snapshot, crystal, boundary_buffer=3.5, is_2d: bool = True):
"""Classify molecules as crystalline, amorphous or boundary."""
mapping = {"liq": 0, "p2": 1, "p2gg": 2, "pg": 3, "None": 4}
position = snapshot.position
# This gets the details of the box from the simulation
box = snapshot.box[:3]
# All axes have to be True, True == 1, use product for logical and operation
position_mat = np.abs(position) < box[:3] / 3
if is_2d:
is_crystal = np.product(position_mat[:, :2], axis=1).astype(bool)
else:
is_crystal = np.product(position_mat, axis=1).astype(bool)
boundary = np.logical_and(
np.product(np.abs(position) < box[:3] / 3 + boundary_buffer, axis=1),
np.product(np.abs(position) > box[:3] / 3 - boundary_buffer, axis=1),
)
# Create classification array
classification = np.zeros(len(snapshot))
classification[is_crystal] = mapping[crystal]
classification[boundary] = 4
return classification
def neighbour_connectivity(snapshot, max_neighbours=6, max_radius=5):
neighbours = compute_neighbours(
snapshot.box, snapshot.position, max_neighbours, max_radius
)
sparse_values = np.ones(neighbours.shape[0] * neighbours.shape[1])
sparse_coordinates = (
np.repeat(np.arange(neighbours.shape[0]), neighbours.shape[1]),
neighbours.flatten(),
)
connectivity = coo_matrix((sparse_values, sparse_coordinates))
return connectivity.toarray()
def spatial_clustering(snapshot: Frame, classification: np.ndarray = None):
if classification is None:
knn_model = create_ml_ordering("models/knn-trimer.pkl")
classification = knn_model(snapshot)
connectivity = neighbour_connectivity(snapshot)
agg_cluster = sklearn.cluster.AgglomerativeClustering(
n_clusters=2, connectivity=connectivity
)
return agg_cluster.fit_predict((classification > 0).reshape(-1, 1))
def plot_confusion_matrix(
cm, classes, normalize=True, title="Confusion matrix", cmap=plt.cm.Blues
):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print("Confusion matrix, without normalization")
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2.0
for i, j in product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""Utilities for handling the trimer molecule."""
import logging
from itertools import product
from pathlib import Path
from typing import List, NamedTuple, Optional
import gsd.hoomd
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.cluster
from bokeh.plotting import gridplot
from scipy.sparse import coo_matrix
from sdanalysis import Frame, HoomdFrame, util
from sdanalysis.figures import plot_frame
from sdanalysis.order import compute_neighbours, create_ml_ordering
logger = logging.getLogger(__name__)
def read_file(
index: int = 0,
pressure: float = 1.00,
temperature: float = 0.40,
crystal: str = "p2",
directory: Optional[Path] = None,
) -> HoomdFrame:
if directory is None:
directory = Path("../data/simulations/interface/output")
fname = f"dump-Trimer-P{pressure:.2f}-T{temperature:.2f}-{crystal}.gsd"
with gsd.hoomd.open(str(directory / fname), "rb") as trj:
return HoomdFrame(trj[index])
class SnapshotData(NamedTuple):
snapshot: HoomdFrame
temperature: str
pressure: str
crystal: str
iteration_id: str
@classmethod
def from_variables(
cls, snapshot: HoomdFrame, variables: util.Variables
) -> "SnapshotData":
return cls(
snapshot=snapshot,
temperature=variables.temperature,
pressure=variables.pressure,
crystal=variables.crystal,
iteration_id=variables.iteration_id,
)
def read_all_files(
directory: Path, index: int = 0, glob: str = "dump-*"
) -> List[SnapshotData]:
directory = Path(directory)
snapshots = []
for file in directory.glob(glob):
with gsd.hoomd.open(str(file), "rb") as trj:
try:
snap = HoomdFrame(trj[index])
except IndexError:
logger.warning(
"Index %d in input file %s doesn't exist, continuing...",
index,
file.name,
)
snapshots.append(
SnapshotData.from_variables(snap, util.get_filename_vars(file))
)
return snapshots
def plot_grid(frames):
for frame in frames:
frame.plot_height = frame.plot_height // 3
frame.plot_width = frame.plot_width // 3
return gridplot(frames, ncols=3)
def plot_clustering(algorithm, X, snapshots, fit=True):
if fit:
clusters = algorithm.fit_predict(X)
else:
clusters = algorithm.predict(X)
cluster_assignment = np.split(clusters, len(snapshots))
fig = plot_grid(
[
plot_frame(snap, order_list=cluster, categorical_colour=True)
for snap, cluster in zip(snapshots, cluster_assignment)
]
)
return fig
def plot_snapshots(snapshots):
return plot_grid([plot_frame(snap) for snap in snapshots])
def classify_mols(snapshot, crystal, boundary_buffer=3.5, is_2d: bool = True):
"""Classify molecules as crystalline, amorphous or boundary."""
mapping = {"liq": 0, "p2": 1, "p2gg": 2, "pg": 3, "None": 4}
position = snapshot.position
# This gets the details of the box from the simulation
box = snapshot.box[:3]
# All axes have to be True, True == 1, use product for logical and operation
position_mat = np.abs(position) < box[:3] / 3
if is_2d:
is_crystal = np.product(position_mat[:, :2], axis=1).astype(bool)
else:
is_crystal = np.product(position_mat, axis=1).astype(bool)
boundary = np.logical_and(
np.product(np.abs(position) < box[:3] / 3 + boundary_buffer, axis=1),
np.product(np.abs(position) > box[:3] / 3 - boundary_buffer, axis=1),
)
# Create classification array
classification = np.zeros(len(snapshot))
classification[is_crystal] = mapping[crystal]
classification[boundary] = 4
return classification
def neighbour_connectivity(snapshot, max_neighbours=6, max_radius=5):
neighbours = compute_neighbours(
snapshot.box, snapshot.position, max_neighbours, max_radius
)
sparse_values = np.ones(neighbours.shape[0] * neighbours.shape[1])
sparse_coordinates = (
np.repeat(np.arange(neighbours.shape[0]), neighbours.shape[1]),
neighbours.flatten(),
)
connectivity = coo_matrix((sparse_values, sparse_coordinates))
return connectivity.toarray()
def spatial_clustering(snapshot: Frame, classification: np.ndarray = None):
if classification is None:
knn_model = create_ml_ordering("models/knn-trimer.pkl")
classification = knn_model(snapshot)
connectivity = neighbour_connectivity(snapshot)
agg_cluster = sklearn.cluster.AgglomerativeClustering(
n_clusters=2, connectivity=connectivity
)
return agg_cluster.fit_predict((classification > 0).reshape(-1, 1))
def plot_confusion_matrix(
cm, classes, normalize=True, title="Confusion matrix", cmap=plt.cm.Blues
):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print("Confusion matrix, without normalization")
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2.0
for i, j in product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
| en | 0.763053 | #! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2018 <NAME> <<EMAIL>> # # Distributed under terms of the MIT license. Utilities for handling the trimer molecule. Classify molecules as crystalline, amorphous or boundary. # This gets the details of the box from the simulation # All axes have to be True, True == 1, use product for logical and operation # Create classification array This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. | 2.208899 | 2 |
src/accounts/signals.py | m3h-D/Myinfoblog | 0 | 6633179 | from django.dispatch import receiver, Signal
from django.db.models.signals import post_save
from django.contrib.auth import get_user_model
from .models import Profile
User = get_user_model()
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, *args, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
| from django.dispatch import receiver, Signal
from django.db.models.signals import post_save
from django.contrib.auth import get_user_model
from .models import Profile
User = get_user_model()
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, *args, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
| none | 1 | 2.245994 | 2 |
|
serminer/src/gen_aging_stressmark_riscv.py | karthiksv/eraser | 0 | 6633180 | <reponame>karthiksv/eraser
# Copyright 2020 IBM Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import csv
import numpy as np
import string
import random
import os
import os.path
import subprocess as sp
from collections import defaultdict
import pdb
from numpy import genfromtxt
#np.set_printoptions(threshold=np.nan)
VERBOSE=0
INST_SCALE_FACTOR = 1
WTED_SW_THRESHOLD = 1e-4 #Stop adding instructions when remaining switching is less than this value
def get_cov_dict_info(infile, ind_array, ninsts):
cov_dict = defaultdict(list)
pruned_dict = defaultdict(list)
selected_dict = defaultdict(list)
macro_list = np.zeros(ninsts)
with open(infile) as cf:
header = cf.readline() #Skip 1st line
for line in cf:
dict_arr = line.split()
cov_dict[dict_arr[0]] = np.array(dict_arr[1:len(dict_arr)]).astype(int)
pruned_dict[dict_arr[0]] = cov_dict[dict_arr[0]]
selected_dict[dict_arr[0]] = np.zeros(ninsts)
selected_dict[dict_arr[0]][ind_array.astype(int)] = np.array(dict_arr)[1+ind_array.astype(int)].astype(float)
selected_sum = np.sum(selected_dict[dict_arr[0]])
macro_list = macro_list + selected_dict[dict_arr[0]]
if (selected_sum==0):
del pruned_dict[dict_arr[0]]
#else:
#print(str(dict_arr[0]) + " : " +str(cov_sum))
return pruned_dict, macro_list
def get_wm_info(wm_file):
wm_dict = defaultdict(list)
with open(wm_file) as wm:
for line in wm:
dict_arr = line.split()
wm_dict[dict_arr[0]] = np.array(dict_arr[1]).astype(float)
return wm_dict
def get_sw_dict_info(infile, ind_array, m_wt, ninsts):
sw_dict = defaultdict(list)
wted_sw_dict = defaultdict(list)
pruned_dict = defaultdict(list)
pruned_wted_dict = defaultdict(list)
selected_dict = defaultdict(list)
selected_wted_dict = defaultdict(list)
macro_list = np.zeros(ninsts)
macro_wted_list = np.zeros(ninsts)
norm_macro_wted_list = np.zeros(ninsts)
with open(infile) as sf:
header = sf.readline() #Skip 1st line
for line in sf:
dict_arr = line.split()
sw_dict[dict_arr[0]] = np.array(dict_arr[1:len(dict_arr)]).astype(float)
selected_dict[dict_arr[0]] = np.zeros(ninsts)
selected_dict[dict_arr[0]][ind_array.astype(int)] = np.array(dict_arr)[1+ind_array.astype(int)].astype(float)
pruned_dict[dict_arr[0]] = selected_dict[dict_arr[0]]
wted_sw_dict[dict_arr[0]] = m_wt[dict_arr[0]]*selected_dict[dict_arr[0]]
pruned_wted_dict[dict_arr[0]] = wted_sw_dict[dict_arr[0]]
sw_sum = np.sum(sw_dict[dict_arr[0]])
selected_sum = np.sum(selected_dict[dict_arr[0]])
macro_list = macro_list + selected_dict[dict_arr[0]]
macro_wted_list = macro_wted_list + m_wt[dict_arr[0]]*selected_dict[dict_arr[0]]
max_selected_val = float(np.max(np.array(selected_dict[dict_arr[0]])))
if (max_selected_val > 0):
norm_macro_wted_list = norm_macro_wted_list + (m_wt[dict_arr[0]]/max_selected_val)*selected_dict[dict_arr[0]]
max_wted_val = np.max(np.array(norm_macro_wted_list))
if (selected_sum==0):
if (VERBOSE==1):
print("Deleting "+str(dict_arr[0]))
print ("Max weighted switching: "+str(max_wted_val))
del pruned_dict[dict_arr[0]]
del pruned_wted_dict[dict_arr[0]]
return pruned_dict, macro_list, macro_wted_list
def get_res_dict_info(res_file, ninsts):
res_dict = defaultdict(list)
pruned_dict = defaultdict(list)
macro_list = np.zeros(ninsts)
with open(res_file) as rf:
header = rf.readline() #Skip 1st line
for line in rf:
dict_arr = line.split()
#res_dict[dict_arr[0]] = (np.array(dict_arr)[1+selected_indices.astype(int)]).astype(float)
res_dict[dict_arr[0]] = np.array(dict_arr[1:len(dict_arr)]).astype(float)
pruned_dict[dict_arr[0]] = res_dict[dict_arr[0]]
res_sum= np.sum(res_dict[dict_arr[0]])
macro_list = macro_list + res_dict[dict_arr[0]]
if (res_sum==0):
del pruned_dict[dict_arr[0]]
#else:
#print(str(dict_arr[0]) + " : " +str(res_sum))
return pruned_dict, macro_list
def get_targeted_dicts(cov_dict, sw_dict, macro_array):
t_cov_dict = defaultdict(list)
t_sw_dict = defaultdict(list)
for m in macro_array:
t_cov_dict[m] = cov_dict[m]
t_sw_dict[m] = sw_dict[m]
return t_cov_dict, t_sw_dict
def gen_random_inst_list(inst_array, frac):
#ifile=open(inst_file);
#all_insts = [x.strip() for x in ifile.readlines()]
num_lines = len(inst_array)
selected_inst_indices = np.array(np.sort(random.sample(range(1,num_lines),int(frac*num_lines))))
selected_inst_array = np.array(inst_array)[selected_inst_indices.astype(int)]
#print(num_lines,frac,selected_inst_array)
return selected_inst_array, selected_inst_indices
def get_stressmark_inst_res(cov_list, res_list, inst_index, tot_cov_list, tot_res_list, return_list):
#pdb.set_trace()
#find max residency value
max_res = max(tot_res_list.values())
#if VERBOSE:
# print(tot_sw_list.values())
#Find list of instructions with max residency
max_res_list = [inst for inst,val in tot_res_list.items() if val==max_res]
#Check which insts with max residency have highest coverwge - use 1st inst in case of tie
max_cov = max([tot_cov_list[inst] for inst in max_res_list])
tmp_list=dict(zip(max_res_list,[tot_cov_list[inst] for inst in max_res_list]))
#max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov]
max_cov_list = [inst for inst,val in tmp_list.items() if val==max_cov]
#Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index
random_cov_index=random.randint(0,len(max_cov_list)-1)
if VERBOSE:
print(max_res, max_res_list)
print("Coverage of max insts: ")
print(max_cov,max_cov_list[random_cov_index],inst_index[max_cov_list[random_cov_index]])
print("Selected index = "+str(random_cov_index)+ " length: " +str(len(max_cov_list)))
todel_list=[]
deleted_cov_list=[]
deleted_res_list=[]
deleted_list_count = 0
for macro in res_list:
if (res_list[macro][inst_index[max_cov_list[random_cov_index]]] > 0):
todel_list.append(macro)
if VERBOSE:
print("macros to delete")
print(todel_list, len(todel_list))
#delete macros corresponding to max inst
if len(res_list.keys()) >0 and len(todel_list)>0:
for m in todel_list:
deleted_res_list.append(res_list[m])
deleted_cov_list.append(cov_list[m])
deleted_list_count = deleted_list_count + 1
del cov_list[m]
del res_list[m]
if VERBOSE:
print("remaining macros: " +str(len(res_list.keys())))
print("append inst: " +str(max_cov_list[random_cov_index]))
#append instruction to stressmark list
return_list.append(max_cov_list[random_cov_index])
print(return_list)
else:
if VERBOSE:
print("no macros selected by instruction "+str(max_cov_list[random_cov_index]))
for i in tot_res_list:
for l in range(0,deleted_list_count):
if tot_res_list[i]:
tot_res_list[i] = tot_res_list[i] - deleted_res_list[l][inst_index[i]]
tot_cov_list[i] = tot_cov_list[i] - deleted_cov_list[l][inst_index[i]]
#delete instruction
#for i in max_res_list:
inst=max_cov_list[random_cov_index]
del inst_index[inst]
del tot_cov_list[inst]
del tot_res_list[inst]
if (len(res_list.keys()) >0):
get_stressmark_inst_res(cov_list, res_list, inst_index, tot_cov_list, tot_res_list, return_list)
else:
print(return_list)
#return return_list
def get_stressmark_inst_sw(cov_list, sw_list, inst_index, tot_cov_list, tot_sw_list, return_list):
#pdb.set_trace()
#find max switching value
max_sw = max(tot_sw_list.values())
#Find list of instructions with max switching
max_sw_list = [inst for inst,val in tot_sw_list.items() if val==max_sw]
#Check which insts with max switching have highest coverwge - use 1st inst in case of tie
max_cov = max([tot_cov_list[inst] for inst in max_sw_list])
tmp_list = dict(zip(max_sw_list,[tot_cov_list[inst] for inst in max_sw_list]))
#max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov]
max_cov_list = [inst for inst,val in tmp_list.items() if val==max_cov]
#Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index
random_cov_index=random.randint(0,len(max_cov_list)-1)
if VERBOSE:
print(max_sw, max_sw_list)
print("Coverage of max insts: ")
print(max_cov,max_cov_list[random_cov_index],inst_index[max_cov_list[random_cov_index]])
print("random index = "+str(random_cov_index)+ " length: " +str(len(max_cov_list)))
todel_list=[]
#deleted_cov_list=[]
#deleted_sw_list=[]
deleted_cov_list=defaultdict(list)
deleted_sw_list=defaultdict(list)
deleted_list_count = 0
for macro in sw_list:
if (sw_list[macro][inst_index[max_cov_list[random_cov_index]]] > 0):
todel_list.append(macro)
if VERBOSE:
print("macros to delete")
print(todel_list, len(todel_list))
#delete macros corresponding to max inst
#if len(sw_list.keys()) >0 and len(todel_list)>0:
# for m in todel_list:
# deleted_sw_list.append(sw_list[m])
# deleted_cov_list.append(cov_list[m])
# deleted_list_count = deleted_list_count + 1
# del cov_list[m]
# del sw_list[m]
# if VERBOSE:
# print("remaining macros: " +str(len(sw_list.keys())))
# print("append inst: " +str(max_cov_list[random_cov_index]))
# #append instruction to stressmark list
# return_list.append(max_cov_list[random_cov_index])
# print(return_list)
#else:
# if VERBOSE:
# print("no macros selected by instruction "+str(max_cov_list[random_cov_index]))
if len(sw_list.keys()) >0 and len(todel_list)>0:
for m in todel_list:
deleted_sw_list[m] = sw_list[m]
deleted_cov_list[m] = cov_list[m]
deleted_list_count = deleted_list_count + 1
del cov_list[m]
del sw_list[m]
if VERBOSE:
print("remaining macros: " +str(len(sw_list.keys())))
print(sw_list.keys())
print("append inst: " +str(max_cov_list[random_cov_index]))
#append instruction to stressmark list
if(len(todel_list)>0):
return_list.append(max_cov_list[random_cov_index])
else:
print("No new macros selected")
print(return_list)
else:
if VERBOSE:
print("no macros selected by instruction "+str(max_cov_list[random_cov_index]))
if VERBOSE:
print("Deleted KEYS and VALS::::")
print ("COV:")
for k,v in deleted_cov_list.items():
print (k)
print ("SW:")
for k,v in deleted_sw_list.items():
print (k)
for i in tot_sw_list:
#for l in range(0,deleted_list_count):
for l in todel_list:
if tot_cov_list[i]:
#print("Sw: ",tot_sw_list[i], deleted_sw_list[l], l, i, inst_index[i])
tot_sw_list[i] = tot_sw_list[i] - deleted_sw_list[l][inst_index[i]]
tot_cov_list[i] = tot_cov_list[i] - deleted_cov_list[l][inst_index[i]]
#delete instruction
#for i in max_sw_list:
inst=max_cov_list[random_cov_index]
del inst_index[inst]
del tot_cov_list[inst]
del tot_sw_list[inst]
#print(sw_list)
if len(sw_list.keys()) >0 or len(cov_list.keys())>0:
get_stressmark_inst_sw(cov_list, sw_list, inst_index, tot_cov_list, tot_sw_list, return_list)
else:
print(return_list)
#return return_list
def get_stressmark_inst_macro_virus(cov_list, sw_list, targeted_cov_list, targeted_sw_list, inst_index, tot_cov_list, tot_sw_list, wt_list, return_list):
#pdb.set_trace()
#find max switching value
max_sw = max(tot_sw_list.values())
#Find list of instructions with max switching
max_sw_list = [inst for inst,val in tot_sw_list.items() if val==max_sw]
#Check which insts with max switching have highest coverwge - use 1st inst in case of tie
max_cov = max([tot_cov_list[inst] for inst in max_sw_list])
tmp_list = dict(zip(max_sw_list,[tot_cov_list[inst] for inst in max_sw_list]))
#max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov]
max_cov_list = [inst for inst,val in tmp_list.items() if val==max_cov]
#Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index
random_cov_index=random.randint(0,len(max_cov_list)-1)
if VERBOSE:
print(max_sw, max_sw_list)
print("Coverage of max insts: ")
print(max_cov,max_cov_list[random_cov_index],inst_index[max_cov_list[random_cov_index]])
print("random index = "+str(random_cov_index)+ " length: " +str(len(max_cov_list)))
todel_list=[]
#deleted_cov_list=[]
#deleted_sw_list=[]
deleted_cov_list=defaultdict(list)
deleted_sw_list=defaultdict(list)
deleted_list_count = 0
for macro in sw_list:
if (sw_list[macro][inst_index[max_cov_list[random_cov_index]]] > 0):
todel_list.append(macro)
if VERBOSE:
print("macros to delete")
print(todel_list, len(todel_list))
#print("Weighted list")
#for macro in todel_list:
# print(str(macro), " ", wt_list[macro])
if len(sw_list.keys()) >0 and len(todel_list)>0:
for m in todel_list:
deleted_sw_list[m] = sw_list[m]
deleted_cov_list[m] = cov_list[m]
deleted_list_count = deleted_list_count + 1
del cov_list[m]
del sw_list[m]
if(m in targeted_cov_list.keys()):
del targeted_cov_list[m]
if(m in targeted_sw_list.keys()):
del targeted_sw_list[m]
if VERBOSE:
print("remaining macros fro targeted list: " +str(len(targeted_sw_list.keys())))
print(targeted_sw_list.keys())
print("append inst: " +str(max_cov_list[random_cov_index]))
#append instruction to stressmark list
if(len(todel_list)>0):
return_list.append(max_cov_list[random_cov_index])
else:
print("No new macros selected")
print(return_list)
else:
if VERBOSE:
print("no macros selected by instruction "+str(max_cov_list[random_cov_index]))
if VERBOSE:
print("Deleted KEYS and VALS::::")
print ("COV:")
for k,v in deleted_cov_list.items():
print (k)
print ("SW:")
for k,v in deleted_sw_list.items():
print (k)
for i in tot_sw_list:
#for l in range(0,deleted_list_count):
for l in todel_list:
if tot_cov_list[i]:
#print("Sw: ",tot_sw_list[i], deleted_sw_list[l], l, i, inst_index[i])
tot_sw_list[i] = tot_sw_list[i] - wt_list[l]*deleted_sw_list[l][inst_index[i]]
tot_cov_list[i] = tot_cov_list[i] - deleted_cov_list[l][inst_index[i]]
#delete instruction
#for i in max_sw_list:
inst=max_cov_list[random_cov_index]
del inst_index[inst]
del tot_cov_list[inst]
del tot_sw_list[inst]
#print(sw_list)
tot_sw_val = sum([tot_sw_list[i] for i in tot_sw_list])
if VERBOSE:
print("Tot sw: ",tot_sw_val)
if (len(targeted_sw_list.keys()) >0 or len(targeted_cov_list.keys())>0) and tot_sw_val>WTED_SW_THRESHOLD:
get_stressmark_inst_macro_virus(cov_list, sw_list, targeted_cov_list, targeted_sw_list, inst_index, tot_cov_list, tot_sw_list, wt_list, return_list)
else:
print(return_list)
#return return_list
def get_stressmark_inst_wted_sw(cov_list, sw_list, inst_index, tot_cov_list, tot_sw_list, wt_list, return_list):
#pdb.set_trace()
#find max residency value
max_sw = max(tot_sw_list.values())
#Find list of instructions with max switching
max_sw_list = [inst for inst,val in tot_sw_list.items() if val==max_sw]
#Check which insts with max switching have highest coverwge - use 1st inst in case of tie
max_cov = max([tot_cov_list[inst] for inst in max_sw_list])
tmp_list = dict(zip(max_sw_list,[tot_cov_list[inst] for inst in max_sw_list]))
#max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov]
max_cov_list = [inst for inst,val in tmp_list.items() if val==max_cov]
#Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index
random_cov_index=random.randint(0,len(max_cov_list)-1)
if VERBOSE:
print(max_sw, max_sw_list)
print("Coverage of max insts: ")
print(max_cov,max_cov_list[random_cov_index],inst_index[max_cov_list[random_cov_index]])
print("random index = "+str(random_cov_index)+ " length: " +str(len(max_cov_list)))
todel_list=[]
#deleted_cov_list=[]
#deleted_sw_list=[]
deleted_cov_list=defaultdict(list)
deleted_sw_list=defaultdict(list)
deleted_list_count = 0
for macro in sw_list:
if (sw_list[macro][inst_index[max_cov_list[random_cov_index]]] > 0):
todel_list.append(macro)
if VERBOSE:
print("macros to delete")
print(todel_list, len(todel_list))
if len(sw_list.keys()) >0 and len(todel_list)>0:
for m in todel_list:
deleted_sw_list[m] = sw_list[m]
deleted_cov_list[m] = cov_list[m]
deleted_list_count = deleted_list_count + 1
del cov_list[m]
del sw_list[m]
if VERBOSE:
print("remaining macros: " +str(len(sw_list.keys())))
print(sw_list.keys())
print("append inst: " +str(max_cov_list[random_cov_index]))
#append instruction to stressmark list
if(len(todel_list)>0):
return_list.append(max_cov_list[random_cov_index])
else:
print("No new macros selected")
print(return_list)
else:
if VERBOSE:
print("no macros selected by instruction "+str(max_cov_list[random_cov_index]))
if VERBOSE:
print("Deleted KEYS and VALS::::")
print ("COV:")
for k,v in deleted_cov_list.items():
print (k)
print ("SW:")
for k,v in deleted_sw_list.items():
print (k)
for i in tot_sw_list:
#for l in range(0,deleted_list_count):
for l in todel_list:
if tot_cov_list[i]:
#print("Sw: ",tot_sw_list[i], deleted_sw_list[l], l, i, inst_index[i])
tot_sw_list[i] = tot_sw_list[i] - wt_list[l]*deleted_sw_list[l][inst_index[i]]
tot_cov_list[i] = tot_cov_list[i] - deleted_cov_list[l][inst_index[i]]
#delete instruction
#for i in max_sw_list:
inst=max_cov_list[random_cov_index]
del inst_index[inst]
del tot_cov_list[inst]
del tot_sw_list[inst]
#print(sw_list)
tot_sw_val = sum([tot_sw_list[i] for i in tot_sw_list])
if VERBOSE:
print("Tot sw: ",tot_sw_val)
if (len(sw_list.keys()) >0 or len(cov_list.keys())>0) and tot_sw_val>WTED_SW_THRESHOLD:
get_stressmark_inst_wted_sw(cov_list, sw_list, inst_index, tot_cov_list, tot_sw_list, wt_list, return_list)
else:
print(return_list)
#return return_list
def main():
print("Running command: python " +str(sys.argv) + "............")
#Paths
SERMINER_CONFIG_HOME=os.environ['SERMINER_CONFIG_HOME']
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output_dir", type=str, help="Output dir", required=True)
parser.add_argument("-n", "--num_insts", type=int, help="Number of input instructions", required=False)
parser.add_argument("-t", "--stressmark_type", type=str, help="Type of stressmark (coverage/switching/residency based)", required=False, default="res")
parser.add_argument("-th", "--sw_threshold", type=str, help="Switching Threshold", required=True)
parser.add_argument("-if", "--inst_fraction", type=float, help="Instruction fraction (between 0 and 1)", required=False, default=0.99)
parser.add_argument("-il", "--workload_inst_list", type=str, help="Workload instruction list (if instruction fraction is not provided)", default=str(SERMINER_CONFIG_HOME)+'/inst_list.txt', required=False)
parser.add_argument("-ml", "--targeted_macro_list", type=str, help="Macro list for targeted viruses", required=False, default='/tmp/DUMMY_PATH') #Python complains if a dummy path is not given
parser.add_argument("-p", "--print_val", type=int, help="Print insts (0) / Print weights (1)", required=True)
args = parser.parse_args()
OUTPUT_DIR = args.output_dir
#NUM_INSTS = args.num_insts
if (args.stressmark_type):
stressmk_type = args.stressmark_type
sw_threshold = args.sw_threshold
if (args.print_val):
PRINT_WEIGHTS = 1
PRINT_INSTS = 0
else:
PRINT_INSTS = 1
PRINT_WEIGHTS = 0
if (args.inst_fraction >=1 or args.inst_fraction <0):
print("Invalid instruction fraction... Should be between 0 and 1")
exit()
else:
inst_fraction=args.inst_fraction
inst_list = str(SERMINER_CONFIG_HOME) + '/inst_list.txt'
NUM_INSTS = len(open(inst_list).readlines())
if (os.path.exists(args.targeted_macro_list)): #Only if workload inst list does not exist
targeted_macro_list = args.targeted_macro_list
num_targeted_macros = len(open(targeted_macro_list).readlines())
targeted_macro_array = [line.rstrip('\n') for line in open(targeted_macro_list, 'r').readlines()]
if (not os.path.exists(args.workload_inst_list)):
if (args.inst_fraction>=0.99):
print("Instruction list not provided or incorrect. Setting instruction list as default ", inst_list)
else:
print("Selecting random instruction fraction of ", inst_fraction)
else:
workload_inst_list = args.workload_inst_list
workload_num_insts = len(open(workload_inst_list).readlines())
workload_inst_array = [line.rstrip('\n') for line in open(workload_inst_list, 'r').readlines()]
print ("Workload Inst list ", workload_inst_list, "exists!!! Num insts:", workload_num_insts)
#out = sp.Popen(['wc -l ', str(OUTPUT_DIR),'/inst_list.txt'], stdout=sp.PIPE, stderr=sp.STDOUT)
#stdout, stderr = out.communicate()
#print("Num insts: "+str(stdout))
coverage_file = str(OUTPUT_DIR) + '/macro_perinst_coverage_th' +str(sw_threshold)+'.txt'
switching_file = str(OUTPUT_DIR) + '/macro_perinst_switching_th' +str(sw_threshold)+'.txt'
wted_macro_file = str(OUTPUT_DIR) + '/wted_macro_th' +str(sw_threshold)+'.txt'
residency_file = str(OUTPUT_DIR) + '/macro_perinst_residency_th' +str(sw_threshold)+'.txt'
#Initialize lists
#a) cov_dict - complete dictionary with all instructions and macros
#b) selected_cov_dict - dictionary with only selected intructions and all macros
#c) pruned_cov_dict - dictionary with all instructions and macros with non zero switching in selected list
inst_array = [line.rstrip('\n') for line in open(inst_list, 'r').readlines()]
if (os.path.exists(args.workload_inst_list)):
#Use Workload inst list
print("Workload inst array selected")
print(workload_inst_array)
selected_inst_array = np.array(workload_inst_array)
selected_indices = np.array([inst_array.index(x) for x in inst_array if x in workload_inst_array])
elif(inst_fraction<0.99):
#Generate randomly selected instruction arrays
selected_inst_array, selected_indices = gen_random_inst_list(inst_array, inst_fraction)
else:
#Use default inst list
selected_inst_array = inst_array
selected_indices = np.arange(0,len(inst_array))
#Read input files
pruned_cov_dict, macros_per_inst = get_cov_dict_info(coverage_file, selected_indices, NUM_INSTS)
wm_dict = get_wm_info(wted_macro_file)
pruned_sw_dict, macro_sw_per_inst, macro_wted_sw_per_inst = get_sw_dict_info(switching_file, selected_indices, wm_dict, NUM_INSTS)
pruned_res_dict, macro_res_per_inst = get_res_dict_info(residency_file, NUM_INSTS)
if (os.path.exists(args.targeted_macro_list)):
targeted_cov_dict = dict((m, pruned_cov_dict[m])for m in targeted_macro_array)
targeted_sw_dict = dict((m, pruned_sw_dict[m]) for m in targeted_macro_array)
targeted_wm_dict = dict((m, 0) for m in wm_dict) #Initialize targeted_wm_dict to 0
selected_indices = np.arange(0,len(inst_array)) #Use all insts
for m in targeted_macro_array:
targeted_wm_dict[m] = wm_dict[m]
tmp_pruned_sw_dict, tmp_macro_sw_per_inst, targeted_macro_wted_sw_per_inst = get_sw_dict_info(switching_file, selected_indices, targeted_wm_dict, NUM_INSTS) #Only last parameter needed
inst_targeted_macro_wted_sw_dict = dict(zip(inst_array, targeted_macro_wted_sw_per_inst))
else:
targeted_cov_dict = pruned_cov_dict
targeted_sw_dict = pruned_sw_dict
targeted_wm_dict = wm_dict
#Generate dictionaries
inst_index_dict = dict(zip(inst_array,range(0,len(inst_array))))
inst_macro_dict = dict(zip(inst_array, macros_per_inst))
inst_macro_sw_dict = dict(zip(inst_array, macro_sw_per_inst))
inst_macro_wted_sw_dict = dict(zip(inst_array, macro_wted_sw_per_inst))
inst_macro_res_dict = dict(zip(inst_array, macro_res_per_inst))
#Preserve original list
init_inst_macro_sw_dict = inst_macro_sw_dict.copy()
init_inst_macro_wted_sw_dict = inst_macro_wted_sw_dict.copy()
init_inst_macro_res_dict = inst_macro_res_dict.copy()
if (os.path.exists(args.targeted_macro_list)):
init_inst_targeted_macro_wted_sw_dict = inst_targeted_macro_wted_sw_dict.copy()
if VERBOSE:
print(len(init_inst_targeted_macro_wted_sw_dict))
if (os.path.exists(args.targeted_macro_list)):
print("Targeted Macros: " +str(len(targeted_cov_dict)))
if VERBOSE:
print(targeted_cov_dict)
print("Macros with non-zero switching: " +str(len(targeted_sw_dict)))
if VERBOSE:
print(targeted_sw_dict)
#print(inst_targeted_macro_wted_sw_dict)
print(init_inst_macro_wted_sw_dict)
else:
print("Total Macros: " +str(len(pruned_cov_dict)))
if VERBOSE:
print(pruned_cov_dict)
print("Macros with non-zero switching: " +str(len(pruned_sw_dict)))
if VERBOSE:
print(pruned_sw_dict)
#Recursive function to get list of instructions in stressmark
stressmark_inst_list=[]
#get_stressmark_inst( pruned_cov_dict, pruned_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_sw_dict, stressmark_inst_list)
if (stressmk_type == "cov"):
print ("Generating Coverage stressmark")
get_stressmark_inst_cov( pruned_cov_dict, pruned_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_sw_dict, stressmark_inst_list)
elif (stressmk_type == "sw"):
get_stressmark_inst_sw( pruned_cov_dict, pruned_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_sw_dict, stressmark_inst_list)
elif (stressmk_type == "wted_sw"):
get_stressmark_inst_wted_sw(pruned_cov_dict, pruned_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_wted_sw_dict, wm_dict, stressmark_inst_list)
elif (stressmk_type == "macro_virus"): #only weighted switching
#get_stressmark_inst_wted_sw(targeted_cov_dict, targeted_sw_dict, inst_index_dict, inst_macro_dict, inst_targeted_macro_wted_sw_dict, targeted_wm_dict, stressmark_inst_list)
get_stressmark_inst_wted_sw(targeted_cov_dict, targeted_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_wted_sw_dict, targeted_wm_dict, stressmark_inst_list)
elif (stressmk_type == "res"): # Default option for any core with no clock gating
get_stressmark_inst_res( pruned_cov_dict, pruned_res_dict, inst_index_dict, inst_macro_dict, inst_macro_res_dict, stressmark_inst_list)
if(PRINT_INSTS):
print("Print stressmark instructions")
print(" ".join(stressmark_inst_list))
#print("Switching vals")
#print("Max inst lists: " +str(stressmark_inst_list))
if(PRINT_WEIGHTS):
print("Print stressmark instruction weights")
if (stressmk_type == "cov"):
min_val=min([init_inst_macro_sw_dict[inst] for inst in stressmark_inst_list])
for inst in stressmark_inst_list:
print(str(int(round(INST_SCALE_FACTOR*init_inst_macro_sw_dict[inst]/min_val))),' ',end='')
elif (stressmk_type == "sw"):
min_val=min([init_inst_macro_sw_dict[inst] for inst in stressmark_inst_list])
for inst in stressmark_inst_list:
print(str(int(round(INST_SCALE_FACTOR*init_inst_macro_sw_dict[inst]/min_val))),' ',end='')
elif (stressmk_type == "wted_sw"):
min_val=min([init_inst_macro_wted_sw_dict[inst] for inst in stressmark_inst_list])
for inst in stressmark_inst_list:
print(str(int(round(INST_SCALE_FACTOR*init_inst_macro_wted_sw_dict[inst]/min_val))),' ',end='')
elif (stressmk_type == "macro_virus"):
min_val=min([init_inst_targeted_macro_wted_sw_dict[inst] for inst in stressmark_inst_list])
for inst in stressmark_inst_list:
print(str(int(round(INST_SCALE_FACTOR*init_inst_targeted_macro_wted_sw_dict[inst]/min_val))),' ',end='')
elif (stressmk_type == "res"):
min_val=min([init_inst_macro_val_dict[inst] for inst in stressmark_inst_list])
for inst in stressmark_inst_list:
print(str(int(round(INST_SCALE_FACTOR*init_inst_macro_res_dict[inst]/min_val))),' ',end='')
#print("Inst: "+str(inst) + " switching: " +str(init_inst_macro_sw_dict[inst]) + " Weight: " +str(int(round(INST_SCALE_FACTOR*init_inst_macro_sw_dict[inst]/min_sw))),' ',end='')
print('')
## Write selected array to file
#selected_inst_file = "/tmp/STRESSMARK_OUT/IF_"+str(inst_fraction)+"/selected_inst_list.txt"
#with open(selected_inst_file, 'w') as f:
# for item in selected_inst_array:
# f.write("%s\n" % item)
if __name__ == "__main__":
main()
| # Copyright 2020 IBM Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import csv
import numpy as np
import string
import random
import os
import os.path
import subprocess as sp
from collections import defaultdict
import pdb
from numpy import genfromtxt
#np.set_printoptions(threshold=np.nan)
VERBOSE=0
INST_SCALE_FACTOR = 1
WTED_SW_THRESHOLD = 1e-4 #Stop adding instructions when remaining switching is less than this value
def get_cov_dict_info(infile, ind_array, ninsts):
cov_dict = defaultdict(list)
pruned_dict = defaultdict(list)
selected_dict = defaultdict(list)
macro_list = np.zeros(ninsts)
with open(infile) as cf:
header = cf.readline() #Skip 1st line
for line in cf:
dict_arr = line.split()
cov_dict[dict_arr[0]] = np.array(dict_arr[1:len(dict_arr)]).astype(int)
pruned_dict[dict_arr[0]] = cov_dict[dict_arr[0]]
selected_dict[dict_arr[0]] = np.zeros(ninsts)
selected_dict[dict_arr[0]][ind_array.astype(int)] = np.array(dict_arr)[1+ind_array.astype(int)].astype(float)
selected_sum = np.sum(selected_dict[dict_arr[0]])
macro_list = macro_list + selected_dict[dict_arr[0]]
if (selected_sum==0):
del pruned_dict[dict_arr[0]]
#else:
#print(str(dict_arr[0]) + " : " +str(cov_sum))
return pruned_dict, macro_list
def get_wm_info(wm_file):
wm_dict = defaultdict(list)
with open(wm_file) as wm:
for line in wm:
dict_arr = line.split()
wm_dict[dict_arr[0]] = np.array(dict_arr[1]).astype(float)
return wm_dict
def get_sw_dict_info(infile, ind_array, m_wt, ninsts):
sw_dict = defaultdict(list)
wted_sw_dict = defaultdict(list)
pruned_dict = defaultdict(list)
pruned_wted_dict = defaultdict(list)
selected_dict = defaultdict(list)
selected_wted_dict = defaultdict(list)
macro_list = np.zeros(ninsts)
macro_wted_list = np.zeros(ninsts)
norm_macro_wted_list = np.zeros(ninsts)
with open(infile) as sf:
header = sf.readline() #Skip 1st line
for line in sf:
dict_arr = line.split()
sw_dict[dict_arr[0]] = np.array(dict_arr[1:len(dict_arr)]).astype(float)
selected_dict[dict_arr[0]] = np.zeros(ninsts)
selected_dict[dict_arr[0]][ind_array.astype(int)] = np.array(dict_arr)[1+ind_array.astype(int)].astype(float)
pruned_dict[dict_arr[0]] = selected_dict[dict_arr[0]]
wted_sw_dict[dict_arr[0]] = m_wt[dict_arr[0]]*selected_dict[dict_arr[0]]
pruned_wted_dict[dict_arr[0]] = wted_sw_dict[dict_arr[0]]
sw_sum = np.sum(sw_dict[dict_arr[0]])
selected_sum = np.sum(selected_dict[dict_arr[0]])
macro_list = macro_list + selected_dict[dict_arr[0]]
macro_wted_list = macro_wted_list + m_wt[dict_arr[0]]*selected_dict[dict_arr[0]]
max_selected_val = float(np.max(np.array(selected_dict[dict_arr[0]])))
if (max_selected_val > 0):
norm_macro_wted_list = norm_macro_wted_list + (m_wt[dict_arr[0]]/max_selected_val)*selected_dict[dict_arr[0]]
max_wted_val = np.max(np.array(norm_macro_wted_list))
if (selected_sum==0):
if (VERBOSE==1):
print("Deleting "+str(dict_arr[0]))
print ("Max weighted switching: "+str(max_wted_val))
del pruned_dict[dict_arr[0]]
del pruned_wted_dict[dict_arr[0]]
return pruned_dict, macro_list, macro_wted_list
def get_res_dict_info(res_file, ninsts):
res_dict = defaultdict(list)
pruned_dict = defaultdict(list)
macro_list = np.zeros(ninsts)
with open(res_file) as rf:
header = rf.readline() #Skip 1st line
for line in rf:
dict_arr = line.split()
#res_dict[dict_arr[0]] = (np.array(dict_arr)[1+selected_indices.astype(int)]).astype(float)
res_dict[dict_arr[0]] = np.array(dict_arr[1:len(dict_arr)]).astype(float)
pruned_dict[dict_arr[0]] = res_dict[dict_arr[0]]
res_sum= np.sum(res_dict[dict_arr[0]])
macro_list = macro_list + res_dict[dict_arr[0]]
if (res_sum==0):
del pruned_dict[dict_arr[0]]
#else:
#print(str(dict_arr[0]) + " : " +str(res_sum))
return pruned_dict, macro_list
def get_targeted_dicts(cov_dict, sw_dict, macro_array):
t_cov_dict = defaultdict(list)
t_sw_dict = defaultdict(list)
for m in macro_array:
t_cov_dict[m] = cov_dict[m]
t_sw_dict[m] = sw_dict[m]
return t_cov_dict, t_sw_dict
def gen_random_inst_list(inst_array, frac):
#ifile=open(inst_file);
#all_insts = [x.strip() for x in ifile.readlines()]
num_lines = len(inst_array)
selected_inst_indices = np.array(np.sort(random.sample(range(1,num_lines),int(frac*num_lines))))
selected_inst_array = np.array(inst_array)[selected_inst_indices.astype(int)]
#print(num_lines,frac,selected_inst_array)
return selected_inst_array, selected_inst_indices
def get_stressmark_inst_res(cov_list, res_list, inst_index, tot_cov_list, tot_res_list, return_list):
#pdb.set_trace()
#find max residency value
max_res = max(tot_res_list.values())
#if VERBOSE:
# print(tot_sw_list.values())
#Find list of instructions with max residency
max_res_list = [inst for inst,val in tot_res_list.items() if val==max_res]
#Check which insts with max residency have highest coverwge - use 1st inst in case of tie
max_cov = max([tot_cov_list[inst] for inst in max_res_list])
tmp_list=dict(zip(max_res_list,[tot_cov_list[inst] for inst in max_res_list]))
#max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov]
max_cov_list = [inst for inst,val in tmp_list.items() if val==max_cov]
#Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index
random_cov_index=random.randint(0,len(max_cov_list)-1)
if VERBOSE:
print(max_res, max_res_list)
print("Coverage of max insts: ")
print(max_cov,max_cov_list[random_cov_index],inst_index[max_cov_list[random_cov_index]])
print("Selected index = "+str(random_cov_index)+ " length: " +str(len(max_cov_list)))
todel_list=[]
deleted_cov_list=[]
deleted_res_list=[]
deleted_list_count = 0
for macro in res_list:
if (res_list[macro][inst_index[max_cov_list[random_cov_index]]] > 0):
todel_list.append(macro)
if VERBOSE:
print("macros to delete")
print(todel_list, len(todel_list))
#delete macros corresponding to max inst
if len(res_list.keys()) >0 and len(todel_list)>0:
for m in todel_list:
deleted_res_list.append(res_list[m])
deleted_cov_list.append(cov_list[m])
deleted_list_count = deleted_list_count + 1
del cov_list[m]
del res_list[m]
if VERBOSE:
print("remaining macros: " +str(len(res_list.keys())))
print("append inst: " +str(max_cov_list[random_cov_index]))
#append instruction to stressmark list
return_list.append(max_cov_list[random_cov_index])
print(return_list)
else:
if VERBOSE:
print("no macros selected by instruction "+str(max_cov_list[random_cov_index]))
for i in tot_res_list:
for l in range(0,deleted_list_count):
if tot_res_list[i]:
tot_res_list[i] = tot_res_list[i] - deleted_res_list[l][inst_index[i]]
tot_cov_list[i] = tot_cov_list[i] - deleted_cov_list[l][inst_index[i]]
#delete instruction
#for i in max_res_list:
inst=max_cov_list[random_cov_index]
del inst_index[inst]
del tot_cov_list[inst]
del tot_res_list[inst]
if (len(res_list.keys()) >0):
get_stressmark_inst_res(cov_list, res_list, inst_index, tot_cov_list, tot_res_list, return_list)
else:
print(return_list)
#return return_list
def get_stressmark_inst_sw(cov_list, sw_list, inst_index, tot_cov_list, tot_sw_list, return_list):
#pdb.set_trace()
#find max switching value
max_sw = max(tot_sw_list.values())
#Find list of instructions with max switching
max_sw_list = [inst for inst,val in tot_sw_list.items() if val==max_sw]
#Check which insts with max switching have highest coverwge - use 1st inst in case of tie
max_cov = max([tot_cov_list[inst] for inst in max_sw_list])
tmp_list = dict(zip(max_sw_list,[tot_cov_list[inst] for inst in max_sw_list]))
#max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov]
max_cov_list = [inst for inst,val in tmp_list.items() if val==max_cov]
#Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index
random_cov_index=random.randint(0,len(max_cov_list)-1)
if VERBOSE:
print(max_sw, max_sw_list)
print("Coverage of max insts: ")
print(max_cov,max_cov_list[random_cov_index],inst_index[max_cov_list[random_cov_index]])
print("random index = "+str(random_cov_index)+ " length: " +str(len(max_cov_list)))
todel_list=[]
#deleted_cov_list=[]
#deleted_sw_list=[]
deleted_cov_list=defaultdict(list)
deleted_sw_list=defaultdict(list)
deleted_list_count = 0
for macro in sw_list:
if (sw_list[macro][inst_index[max_cov_list[random_cov_index]]] > 0):
todel_list.append(macro)
if VERBOSE:
print("macros to delete")
print(todel_list, len(todel_list))
#delete macros corresponding to max inst
#if len(sw_list.keys()) >0 and len(todel_list)>0:
# for m in todel_list:
# deleted_sw_list.append(sw_list[m])
# deleted_cov_list.append(cov_list[m])
# deleted_list_count = deleted_list_count + 1
# del cov_list[m]
# del sw_list[m]
# if VERBOSE:
# print("remaining macros: " +str(len(sw_list.keys())))
# print("append inst: " +str(max_cov_list[random_cov_index]))
# #append instruction to stressmark list
# return_list.append(max_cov_list[random_cov_index])
# print(return_list)
#else:
# if VERBOSE:
# print("no macros selected by instruction "+str(max_cov_list[random_cov_index]))
if len(sw_list.keys()) >0 and len(todel_list)>0:
for m in todel_list:
deleted_sw_list[m] = sw_list[m]
deleted_cov_list[m] = cov_list[m]
deleted_list_count = deleted_list_count + 1
del cov_list[m]
del sw_list[m]
if VERBOSE:
print("remaining macros: " +str(len(sw_list.keys())))
print(sw_list.keys())
print("append inst: " +str(max_cov_list[random_cov_index]))
#append instruction to stressmark list
if(len(todel_list)>0):
return_list.append(max_cov_list[random_cov_index])
else:
print("No new macros selected")
print(return_list)
else:
if VERBOSE:
print("no macros selected by instruction "+str(max_cov_list[random_cov_index]))
if VERBOSE:
print("Deleted KEYS and VALS::::")
print ("COV:")
for k,v in deleted_cov_list.items():
print (k)
print ("SW:")
for k,v in deleted_sw_list.items():
print (k)
for i in tot_sw_list:
#for l in range(0,deleted_list_count):
for l in todel_list:
if tot_cov_list[i]:
#print("Sw: ",tot_sw_list[i], deleted_sw_list[l], l, i, inst_index[i])
tot_sw_list[i] = tot_sw_list[i] - deleted_sw_list[l][inst_index[i]]
tot_cov_list[i] = tot_cov_list[i] - deleted_cov_list[l][inst_index[i]]
#delete instruction
#for i in max_sw_list:
inst=max_cov_list[random_cov_index]
del inst_index[inst]
del tot_cov_list[inst]
del tot_sw_list[inst]
#print(sw_list)
if len(sw_list.keys()) >0 or len(cov_list.keys())>0:
get_stressmark_inst_sw(cov_list, sw_list, inst_index, tot_cov_list, tot_sw_list, return_list)
else:
print(return_list)
#return return_list
def get_stressmark_inst_macro_virus(cov_list, sw_list, targeted_cov_list, targeted_sw_list, inst_index, tot_cov_list, tot_sw_list, wt_list, return_list):
#pdb.set_trace()
#find max switching value
max_sw = max(tot_sw_list.values())
#Find list of instructions with max switching
max_sw_list = [inst for inst,val in tot_sw_list.items() if val==max_sw]
#Check which insts with max switching have highest coverwge - use 1st inst in case of tie
max_cov = max([tot_cov_list[inst] for inst in max_sw_list])
tmp_list = dict(zip(max_sw_list,[tot_cov_list[inst] for inst in max_sw_list]))
#max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov]
max_cov_list = [inst for inst,val in tmp_list.items() if val==max_cov]
#Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index
random_cov_index=random.randint(0,len(max_cov_list)-1)
if VERBOSE:
print(max_sw, max_sw_list)
print("Coverage of max insts: ")
print(max_cov,max_cov_list[random_cov_index],inst_index[max_cov_list[random_cov_index]])
print("random index = "+str(random_cov_index)+ " length: " +str(len(max_cov_list)))
todel_list=[]
#deleted_cov_list=[]
#deleted_sw_list=[]
deleted_cov_list=defaultdict(list)
deleted_sw_list=defaultdict(list)
deleted_list_count = 0
for macro in sw_list:
if (sw_list[macro][inst_index[max_cov_list[random_cov_index]]] > 0):
todel_list.append(macro)
if VERBOSE:
print("macros to delete")
print(todel_list, len(todel_list))
#print("Weighted list")
#for macro in todel_list:
# print(str(macro), " ", wt_list[macro])
if len(sw_list.keys()) >0 and len(todel_list)>0:
for m in todel_list:
deleted_sw_list[m] = sw_list[m]
deleted_cov_list[m] = cov_list[m]
deleted_list_count = deleted_list_count + 1
del cov_list[m]
del sw_list[m]
if(m in targeted_cov_list.keys()):
del targeted_cov_list[m]
if(m in targeted_sw_list.keys()):
del targeted_sw_list[m]
if VERBOSE:
print("remaining macros fro targeted list: " +str(len(targeted_sw_list.keys())))
print(targeted_sw_list.keys())
print("append inst: " +str(max_cov_list[random_cov_index]))
#append instruction to stressmark list
if(len(todel_list)>0):
return_list.append(max_cov_list[random_cov_index])
else:
print("No new macros selected")
print(return_list)
else:
if VERBOSE:
print("no macros selected by instruction "+str(max_cov_list[random_cov_index]))
if VERBOSE:
print("Deleted KEYS and VALS::::")
print ("COV:")
for k,v in deleted_cov_list.items():
print (k)
print ("SW:")
for k,v in deleted_sw_list.items():
print (k)
for i in tot_sw_list:
#for l in range(0,deleted_list_count):
for l in todel_list:
if tot_cov_list[i]:
#print("Sw: ",tot_sw_list[i], deleted_sw_list[l], l, i, inst_index[i])
tot_sw_list[i] = tot_sw_list[i] - wt_list[l]*deleted_sw_list[l][inst_index[i]]
tot_cov_list[i] = tot_cov_list[i] - deleted_cov_list[l][inst_index[i]]
#delete instruction
#for i in max_sw_list:
inst=max_cov_list[random_cov_index]
del inst_index[inst]
del tot_cov_list[inst]
del tot_sw_list[inst]
#print(sw_list)
tot_sw_val = sum([tot_sw_list[i] for i in tot_sw_list])
if VERBOSE:
print("Tot sw: ",tot_sw_val)
if (len(targeted_sw_list.keys()) >0 or len(targeted_cov_list.keys())>0) and tot_sw_val>WTED_SW_THRESHOLD:
get_stressmark_inst_macro_virus(cov_list, sw_list, targeted_cov_list, targeted_sw_list, inst_index, tot_cov_list, tot_sw_list, wt_list, return_list)
else:
print(return_list)
#return return_list
def get_stressmark_inst_wted_sw(cov_list, sw_list, inst_index, tot_cov_list, tot_sw_list, wt_list, return_list):
#pdb.set_trace()
#find max residency value
max_sw = max(tot_sw_list.values())
#Find list of instructions with max switching
max_sw_list = [inst for inst,val in tot_sw_list.items() if val==max_sw]
#Check which insts with max switching have highest coverwge - use 1st inst in case of tie
max_cov = max([tot_cov_list[inst] for inst in max_sw_list])
tmp_list = dict(zip(max_sw_list,[tot_cov_list[inst] for inst in max_sw_list]))
#max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov]
max_cov_list = [inst for inst,val in tmp_list.items() if val==max_cov]
#Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index
random_cov_index=random.randint(0,len(max_cov_list)-1)
if VERBOSE:
print(max_sw, max_sw_list)
print("Coverage of max insts: ")
print(max_cov,max_cov_list[random_cov_index],inst_index[max_cov_list[random_cov_index]])
print("random index = "+str(random_cov_index)+ " length: " +str(len(max_cov_list)))
todel_list=[]
#deleted_cov_list=[]
#deleted_sw_list=[]
deleted_cov_list=defaultdict(list)
deleted_sw_list=defaultdict(list)
deleted_list_count = 0
for macro in sw_list:
if (sw_list[macro][inst_index[max_cov_list[random_cov_index]]] > 0):
todel_list.append(macro)
if VERBOSE:
print("macros to delete")
print(todel_list, len(todel_list))
if len(sw_list.keys()) >0 and len(todel_list)>0:
for m in todel_list:
deleted_sw_list[m] = sw_list[m]
deleted_cov_list[m] = cov_list[m]
deleted_list_count = deleted_list_count + 1
del cov_list[m]
del sw_list[m]
if VERBOSE:
print("remaining macros: " +str(len(sw_list.keys())))
print(sw_list.keys())
print("append inst: " +str(max_cov_list[random_cov_index]))
#append instruction to stressmark list
if(len(todel_list)>0):
return_list.append(max_cov_list[random_cov_index])
else:
print("No new macros selected")
print(return_list)
else:
if VERBOSE:
print("no macros selected by instruction "+str(max_cov_list[random_cov_index]))
if VERBOSE:
print("Deleted KEYS and VALS::::")
print ("COV:")
for k,v in deleted_cov_list.items():
print (k)
print ("SW:")
for k,v in deleted_sw_list.items():
print (k)
for i in tot_sw_list:
#for l in range(0,deleted_list_count):
for l in todel_list:
if tot_cov_list[i]:
#print("Sw: ",tot_sw_list[i], deleted_sw_list[l], l, i, inst_index[i])
tot_sw_list[i] = tot_sw_list[i] - wt_list[l]*deleted_sw_list[l][inst_index[i]]
tot_cov_list[i] = tot_cov_list[i] - deleted_cov_list[l][inst_index[i]]
#delete instruction
#for i in max_sw_list:
inst=max_cov_list[random_cov_index]
del inst_index[inst]
del tot_cov_list[inst]
del tot_sw_list[inst]
#print(sw_list)
tot_sw_val = sum([tot_sw_list[i] for i in tot_sw_list])
if VERBOSE:
print("Tot sw: ",tot_sw_val)
if (len(sw_list.keys()) >0 or len(cov_list.keys())>0) and tot_sw_val>WTED_SW_THRESHOLD:
get_stressmark_inst_wted_sw(cov_list, sw_list, inst_index, tot_cov_list, tot_sw_list, wt_list, return_list)
else:
print(return_list)
#return return_list
def main():
print("Running command: python " +str(sys.argv) + "............")
#Paths
SERMINER_CONFIG_HOME=os.environ['SERMINER_CONFIG_HOME']
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output_dir", type=str, help="Output dir", required=True)
parser.add_argument("-n", "--num_insts", type=int, help="Number of input instructions", required=False)
parser.add_argument("-t", "--stressmark_type", type=str, help="Type of stressmark (coverage/switching/residency based)", required=False, default="res")
parser.add_argument("-th", "--sw_threshold", type=str, help="Switching Threshold", required=True)
parser.add_argument("-if", "--inst_fraction", type=float, help="Instruction fraction (between 0 and 1)", required=False, default=0.99)
parser.add_argument("-il", "--workload_inst_list", type=str, help="Workload instruction list (if instruction fraction is not provided)", default=str(SERMINER_CONFIG_HOME)+'/inst_list.txt', required=False)
parser.add_argument("-ml", "--targeted_macro_list", type=str, help="Macro list for targeted viruses", required=False, default='/tmp/DUMMY_PATH') #Python complains if a dummy path is not given
parser.add_argument("-p", "--print_val", type=int, help="Print insts (0) / Print weights (1)", required=True)
args = parser.parse_args()
OUTPUT_DIR = args.output_dir
#NUM_INSTS = args.num_insts
if (args.stressmark_type):
stressmk_type = args.stressmark_type
sw_threshold = args.sw_threshold
if (args.print_val):
PRINT_WEIGHTS = 1
PRINT_INSTS = 0
else:
PRINT_INSTS = 1
PRINT_WEIGHTS = 0
if (args.inst_fraction >=1 or args.inst_fraction <0):
print("Invalid instruction fraction... Should be between 0 and 1")
exit()
else:
inst_fraction=args.inst_fraction
inst_list = str(SERMINER_CONFIG_HOME) + '/inst_list.txt'
NUM_INSTS = len(open(inst_list).readlines())
if (os.path.exists(args.targeted_macro_list)): #Only if workload inst list does not exist
targeted_macro_list = args.targeted_macro_list
num_targeted_macros = len(open(targeted_macro_list).readlines())
targeted_macro_array = [line.rstrip('\n') for line in open(targeted_macro_list, 'r').readlines()]
if (not os.path.exists(args.workload_inst_list)):
if (args.inst_fraction>=0.99):
print("Instruction list not provided or incorrect. Setting instruction list as default ", inst_list)
else:
print("Selecting random instruction fraction of ", inst_fraction)
else:
workload_inst_list = args.workload_inst_list
workload_num_insts = len(open(workload_inst_list).readlines())
workload_inst_array = [line.rstrip('\n') for line in open(workload_inst_list, 'r').readlines()]
print ("Workload Inst list ", workload_inst_list, "exists!!! Num insts:", workload_num_insts)
#out = sp.Popen(['wc -l ', str(OUTPUT_DIR),'/inst_list.txt'], stdout=sp.PIPE, stderr=sp.STDOUT)
#stdout, stderr = out.communicate()
#print("Num insts: "+str(stdout))
coverage_file = str(OUTPUT_DIR) + '/macro_perinst_coverage_th' +str(sw_threshold)+'.txt'
switching_file = str(OUTPUT_DIR) + '/macro_perinst_switching_th' +str(sw_threshold)+'.txt'
wted_macro_file = str(OUTPUT_DIR) + '/wted_macro_th' +str(sw_threshold)+'.txt'
residency_file = str(OUTPUT_DIR) + '/macro_perinst_residency_th' +str(sw_threshold)+'.txt'
#Initialize lists
#a) cov_dict - complete dictionary with all instructions and macros
#b) selected_cov_dict - dictionary with only selected intructions and all macros
#c) pruned_cov_dict - dictionary with all instructions and macros with non zero switching in selected list
inst_array = [line.rstrip('\n') for line in open(inst_list, 'r').readlines()]
if (os.path.exists(args.workload_inst_list)):
#Use Workload inst list
print("Workload inst array selected")
print(workload_inst_array)
selected_inst_array = np.array(workload_inst_array)
selected_indices = np.array([inst_array.index(x) for x in inst_array if x in workload_inst_array])
elif(inst_fraction<0.99):
#Generate randomly selected instruction arrays
selected_inst_array, selected_indices = gen_random_inst_list(inst_array, inst_fraction)
else:
#Use default inst list
selected_inst_array = inst_array
selected_indices = np.arange(0,len(inst_array))
#Read input files
pruned_cov_dict, macros_per_inst = get_cov_dict_info(coverage_file, selected_indices, NUM_INSTS)
wm_dict = get_wm_info(wted_macro_file)
pruned_sw_dict, macro_sw_per_inst, macro_wted_sw_per_inst = get_sw_dict_info(switching_file, selected_indices, wm_dict, NUM_INSTS)
pruned_res_dict, macro_res_per_inst = get_res_dict_info(residency_file, NUM_INSTS)
if (os.path.exists(args.targeted_macro_list)):
targeted_cov_dict = dict((m, pruned_cov_dict[m])for m in targeted_macro_array)
targeted_sw_dict = dict((m, pruned_sw_dict[m]) for m in targeted_macro_array)
targeted_wm_dict = dict((m, 0) for m in wm_dict) #Initialize targeted_wm_dict to 0
selected_indices = np.arange(0,len(inst_array)) #Use all insts
for m in targeted_macro_array:
targeted_wm_dict[m] = wm_dict[m]
tmp_pruned_sw_dict, tmp_macro_sw_per_inst, targeted_macro_wted_sw_per_inst = get_sw_dict_info(switching_file, selected_indices, targeted_wm_dict, NUM_INSTS) #Only last parameter needed
inst_targeted_macro_wted_sw_dict = dict(zip(inst_array, targeted_macro_wted_sw_per_inst))
else:
targeted_cov_dict = pruned_cov_dict
targeted_sw_dict = pruned_sw_dict
targeted_wm_dict = wm_dict
#Generate dictionaries
inst_index_dict = dict(zip(inst_array,range(0,len(inst_array))))
inst_macro_dict = dict(zip(inst_array, macros_per_inst))
inst_macro_sw_dict = dict(zip(inst_array, macro_sw_per_inst))
inst_macro_wted_sw_dict = dict(zip(inst_array, macro_wted_sw_per_inst))
inst_macro_res_dict = dict(zip(inst_array, macro_res_per_inst))
#Preserve original list
init_inst_macro_sw_dict = inst_macro_sw_dict.copy()
init_inst_macro_wted_sw_dict = inst_macro_wted_sw_dict.copy()
init_inst_macro_res_dict = inst_macro_res_dict.copy()
if (os.path.exists(args.targeted_macro_list)):
init_inst_targeted_macro_wted_sw_dict = inst_targeted_macro_wted_sw_dict.copy()
if VERBOSE:
print(len(init_inst_targeted_macro_wted_sw_dict))
if (os.path.exists(args.targeted_macro_list)):
print("Targeted Macros: " +str(len(targeted_cov_dict)))
if VERBOSE:
print(targeted_cov_dict)
print("Macros with non-zero switching: " +str(len(targeted_sw_dict)))
if VERBOSE:
print(targeted_sw_dict)
#print(inst_targeted_macro_wted_sw_dict)
print(init_inst_macro_wted_sw_dict)
else:
print("Total Macros: " +str(len(pruned_cov_dict)))
if VERBOSE:
print(pruned_cov_dict)
print("Macros with non-zero switching: " +str(len(pruned_sw_dict)))
if VERBOSE:
print(pruned_sw_dict)
#Recursive function to get list of instructions in stressmark
stressmark_inst_list=[]
#get_stressmark_inst( pruned_cov_dict, pruned_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_sw_dict, stressmark_inst_list)
if (stressmk_type == "cov"):
print ("Generating Coverage stressmark")
get_stressmark_inst_cov( pruned_cov_dict, pruned_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_sw_dict, stressmark_inst_list)
elif (stressmk_type == "sw"):
get_stressmark_inst_sw( pruned_cov_dict, pruned_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_sw_dict, stressmark_inst_list)
elif (stressmk_type == "wted_sw"):
get_stressmark_inst_wted_sw(pruned_cov_dict, pruned_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_wted_sw_dict, wm_dict, stressmark_inst_list)
elif (stressmk_type == "macro_virus"): #only weighted switching
#get_stressmark_inst_wted_sw(targeted_cov_dict, targeted_sw_dict, inst_index_dict, inst_macro_dict, inst_targeted_macro_wted_sw_dict, targeted_wm_dict, stressmark_inst_list)
get_stressmark_inst_wted_sw(targeted_cov_dict, targeted_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_wted_sw_dict, targeted_wm_dict, stressmark_inst_list)
elif (stressmk_type == "res"): # Default option for any core with no clock gating
get_stressmark_inst_res( pruned_cov_dict, pruned_res_dict, inst_index_dict, inst_macro_dict, inst_macro_res_dict, stressmark_inst_list)
if(PRINT_INSTS):
print("Print stressmark instructions")
print(" ".join(stressmark_inst_list))
#print("Switching vals")
#print("Max inst lists: " +str(stressmark_inst_list))
if(PRINT_WEIGHTS):
print("Print stressmark instruction weights")
if (stressmk_type == "cov"):
min_val=min([init_inst_macro_sw_dict[inst] for inst in stressmark_inst_list])
for inst in stressmark_inst_list:
print(str(int(round(INST_SCALE_FACTOR*init_inst_macro_sw_dict[inst]/min_val))),' ',end='')
elif (stressmk_type == "sw"):
min_val=min([init_inst_macro_sw_dict[inst] for inst in stressmark_inst_list])
for inst in stressmark_inst_list:
print(str(int(round(INST_SCALE_FACTOR*init_inst_macro_sw_dict[inst]/min_val))),' ',end='')
elif (stressmk_type == "wted_sw"):
min_val=min([init_inst_macro_wted_sw_dict[inst] for inst in stressmark_inst_list])
for inst in stressmark_inst_list:
print(str(int(round(INST_SCALE_FACTOR*init_inst_macro_wted_sw_dict[inst]/min_val))),' ',end='')
elif (stressmk_type == "macro_virus"):
min_val=min([init_inst_targeted_macro_wted_sw_dict[inst] for inst in stressmark_inst_list])
for inst in stressmark_inst_list:
print(str(int(round(INST_SCALE_FACTOR*init_inst_targeted_macro_wted_sw_dict[inst]/min_val))),' ',end='')
elif (stressmk_type == "res"):
min_val=min([init_inst_macro_val_dict[inst] for inst in stressmark_inst_list])
for inst in stressmark_inst_list:
print(str(int(round(INST_SCALE_FACTOR*init_inst_macro_res_dict[inst]/min_val))),' ',end='')
#print("Inst: "+str(inst) + " switching: " +str(init_inst_macro_sw_dict[inst]) + " Weight: " +str(int(round(INST_SCALE_FACTOR*init_inst_macro_sw_dict[inst]/min_sw))),' ',end='')
print('')
## Write selected array to file
#selected_inst_file = "/tmp/STRESSMARK_OUT/IF_"+str(inst_fraction)+"/selected_inst_list.txt"
#with open(selected_inst_file, 'w') as f:
# for item in selected_inst_array:
# f.write("%s\n" % item)
if __name__ == "__main__":
main() | en | 0.586025 | # Copyright 2020 IBM Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #np.set_printoptions(threshold=np.nan) #Stop adding instructions when remaining switching is less than this value #Skip 1st line #else: #print(str(dict_arr[0]) + " : " +str(cov_sum)) #Skip 1st line #Skip 1st line #res_dict[dict_arr[0]] = (np.array(dict_arr)[1+selected_indices.astype(int)]).astype(float) #else: #print(str(dict_arr[0]) + " : " +str(res_sum)) #ifile=open(inst_file); #all_insts = [x.strip() for x in ifile.readlines()] #print(num_lines,frac,selected_inst_array) #pdb.set_trace() #find max residency value #if VERBOSE: # print(tot_sw_list.values()) #Find list of instructions with max residency #Check which insts with max residency have highest coverwge - use 1st inst in case of tie #max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov] #Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index #delete macros corresponding to max inst #append instruction to stressmark list #delete instruction #for i in max_res_list: #return return_list #pdb.set_trace() #find max switching value #Find list of instructions with max switching #Check which insts with max switching have highest coverwge - use 1st inst in case of tie #max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov] #Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index #deleted_cov_list=[] #deleted_sw_list=[] #delete macros corresponding to max inst #if len(sw_list.keys()) >0 and len(todel_list)>0: # for m in todel_list: # deleted_sw_list.append(sw_list[m]) # deleted_cov_list.append(cov_list[m]) # deleted_list_count = deleted_list_count + 1 # del cov_list[m] # del sw_list[m] # if VERBOSE: # print("remaining macros: " +str(len(sw_list.keys()))) # print("append inst: " +str(max_cov_list[random_cov_index])) # #append instruction to stressmark list # return_list.append(max_cov_list[random_cov_index]) # print(return_list) #else: # if VERBOSE: # print("no macros selected by instruction "+str(max_cov_list[random_cov_index])) #append instruction to stressmark list #for l in range(0,deleted_list_count): #print("Sw: ",tot_sw_list[i], deleted_sw_list[l], l, i, inst_index[i]) #delete instruction #for i in max_sw_list: #print(sw_list) #return return_list #pdb.set_trace() #find max switching value #Find list of instructions with max switching #Check which insts with max switching have highest coverwge - use 1st inst in case of tie #max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov] #Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index #deleted_cov_list=[] #deleted_sw_list=[] #print("Weighted list") #for macro in todel_list: # print(str(macro), " ", wt_list[macro]) #append instruction to stressmark list #for l in range(0,deleted_list_count): #print("Sw: ",tot_sw_list[i], deleted_sw_list[l], l, i, inst_index[i]) #delete instruction #for i in max_sw_list: #print(sw_list) #return return_list #pdb.set_trace() #find max residency value #Find list of instructions with max switching #Check which insts with max switching have highest coverwge - use 1st inst in case of tie #max_cov_list = [inst for inst,val in tot_cov_list.items() if val==max_cov] #Choose instruction with max coverage in case of tie.. if coverage is equal, choose a random index #deleted_cov_list=[] #deleted_sw_list=[] #append instruction to stressmark list #for l in range(0,deleted_list_count): #print("Sw: ",tot_sw_list[i], deleted_sw_list[l], l, i, inst_index[i]) #delete instruction #for i in max_sw_list: #print(sw_list) #return return_list #Paths #Python complains if a dummy path is not given #NUM_INSTS = args.num_insts #Only if workload inst list does not exist #out = sp.Popen(['wc -l ', str(OUTPUT_DIR),'/inst_list.txt'], stdout=sp.PIPE, stderr=sp.STDOUT) #stdout, stderr = out.communicate() #print("Num insts: "+str(stdout)) #Initialize lists #a) cov_dict - complete dictionary with all instructions and macros #b) selected_cov_dict - dictionary with only selected intructions and all macros #c) pruned_cov_dict - dictionary with all instructions and macros with non zero switching in selected list #Use Workload inst list #Generate randomly selected instruction arrays #Use default inst list #Read input files #Initialize targeted_wm_dict to 0 #Use all insts #Only last parameter needed #Generate dictionaries #Preserve original list #print(inst_targeted_macro_wted_sw_dict) #Recursive function to get list of instructions in stressmark #get_stressmark_inst( pruned_cov_dict, pruned_sw_dict, inst_index_dict, inst_macro_dict, inst_macro_sw_dict, stressmark_inst_list) #only weighted switching #get_stressmark_inst_wted_sw(targeted_cov_dict, targeted_sw_dict, inst_index_dict, inst_macro_dict, inst_targeted_macro_wted_sw_dict, targeted_wm_dict, stressmark_inst_list) # Default option for any core with no clock gating #print("Switching vals") #print("Max inst lists: " +str(stressmark_inst_list)) #print("Inst: "+str(inst) + " switching: " +str(init_inst_macro_sw_dict[inst]) + " Weight: " +str(int(round(INST_SCALE_FACTOR*init_inst_macro_sw_dict[inst]/min_sw))),' ',end='') ## Write selected array to file #selected_inst_file = "/tmp/STRESSMARK_OUT/IF_"+str(inst_fraction)+"/selected_inst_list.txt" #with open(selected_inst_file, 'w') as f: # for item in selected_inst_array: # f.write("%s\n" % item) | 1.734188 | 2 |
bot/replies/thanks.py | shivakumarb3/vardhamanbot | 9 | 6633181 | import random
def reply(activity, bot, data):
responses = [
"I'm glad I'm helpful!",
"You're welcome!",
"I'm glad I could be of assistance",
"Your're very welcome!",
"glad to help",
"It's my pleasure",
"Sure thing!",
"I'm touched"
]
response = random.choice(responses)
bot.send_text_activity(activity, response) | import random
def reply(activity, bot, data):
responses = [
"I'm glad I'm helpful!",
"You're welcome!",
"I'm glad I could be of assistance",
"Your're very welcome!",
"glad to help",
"It's my pleasure",
"Sure thing!",
"I'm touched"
]
response = random.choice(responses)
bot.send_text_activity(activity, response) | none | 1 | 2.405585 | 2 |
|
factories/debian.py | expanse-project/expanse-buildbot | 0 | 6633182 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import factory
reload(factory)
from factory import *
import go_ethereum
reload(go_ethereum)
from go_ethereum import _go_cmds
@properties.renderer
def jsonrpc_for_develop(props):
if 'version' in props:
return int(props['version'][2:3]) > 3
return None
@properties.renderer
def deb_version(props):
if 'version' in props:
if ":" in props['version']:
return props['version'][2:]
else:
return props['version']
return None
def deb_factory(name=None, repourl=None, ppabranch=None, branch='master', distribution='trusty', architecture='i386', testdeb=False):
factory = BuildFactory()
for step in [
Git(
haltOnFailure=True,
logEnviron=False,
repourl=repourl,
branch=branch,
mode='full',
method='copy',
retry=(5, 3)
),
# Set snapshot property for Launchpad versioning
SetProperty(
description="setting snapshot",
descriptionDone="set snapshot",
name="set-snapshot",
property="snapshot",
value=Interpolate("+%(prop:buildnumber)s%(kw:snapshot)s%(kw:distribution)s",
snapshot=(dev_snapshot if branch == 'develop' or testdeb else ""),
distribution=distribution)
)
]: factory.addStep(step)
# Run 'go get' for go-expanse
if name == 'expanse':
for step in [
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="move-src",
command=_go_cmds(branch=branch),
description="moving src",
descriptionDone="move src",
env={"GOPATH": Interpolate("%(prop:workdir)s/go")}
),
ShellCommand(
logEnviron=False,
name="source-tarball",
description="creating source tarball",
descriptionDone="create source tarball",
command=Interpolate("tar --exclude .git --exclude pkg --exclude bin -czf "
"../%(kw:name)s_%(prop:version)s%(prop:snapshot)s.orig.tar.gz .", name=name),
workdir=Interpolate("%(prop:workdir)s/go")
),
# clean up the Git checkout for debuild
ShellCommand(
logEnviron=False,
name="clean-build",
command="rm -rf build && mkdir build",
description="cleaning build",
descriptionDone="clean build",
workdir=Interpolate("%(prop:workdir)s")
)
]: factory.addStep(step)
# Get qtwebengine-opensource-src tarball
elif name == 'qtwebengine-opensource-src':
for step in [
ShellCommand(
logEnviron=False,
name="source-tarball",
description="getting source tarball",
descriptionDone="get source tarball",
command=Interpolate("wget -c https://download.qt.io/official_releases/qt/5.4/%(kw:version)s/submodules/"
"qtwebengine-opensource-src-%(kw:version)s.tar.xz "
"-O ../%(kw:name)s_%(prop:version)s%(prop:snapshot)s.orig.tar.xz",
name=name,
version=branch)
),
# clean up the Git checkout for debuild
ShellCommand(
logEnviron=False,
name="clean-build",
command="rm -rf build && mkdir build",
description="cleaning build",
descriptionDone="clean build",
workdir=Interpolate("%(prop:workdir)s")
)
]: factory.addStep(step)
# Just create the source tarball for others
else:
factory.addStep(ShellCommand(
logEnviron=False,
name="source-tarball",
description="creating source tarball",
descriptionDone="create source tarball",
command=Interpolate("tar --exclude .git -czf "
"../%(kw:name)s_%(kw:version)s%(prop:snapshot)s.orig.tar.gz .",
name=name,
version=deb_version)
))
for step in [
# Get debian/ directory
ShellCommand(
logEnviron=False,
name="get-debian",
description="getting debian folder",
descriptionDone="get debian folder",
command=Interpolate("wget https://github.com/expanse-project/expanse-ppa/archive/%(kw:ppabranch)s.tar.gz -O- |"
" tar -zx --exclude package.sh --exclude README.md --strip-components=1",
ppabranch=ppabranch)
),
# Bump version
ShellCommand(
logEnviron=False,
name="bump-debian",
description="bumping %s deb version" % distribution,
descriptionDone="bump %s deb version" % distribution,
command=Interpolate("EMAIL='caktux (Buildserver key) <<EMAIL>>' "
"dch -v %(prop:version)s%(prop:snapshot)s-0ubuntu1 "
"'git build of %(prop:got_revision)s'",
dist=distribution)
),
# Build a source package
ShellCommand(
logEnviron=False,
name="source-package",
description="debuilding %s" % distribution,
descriptionDone="debuild %s" % distribution,
command="debuild -S -sa -us -uc"
),
]: factory.addStep(step)
# Source only packages for dependencies, build local deb packages otherwise
if name in ['expanse', 'cpp-expanse']:
# Add pbuilderrc with ccache config
# factory.addStep(FileDownload(
# mastersrc="pbuilderrc",
# slavedest="~/.pbuilderrc"
# ))
main_ppa = "http://ppa.launchpad.net/expanse/expanse/ubuntu"
dev_ppa = "http://ppa.launchpad.net/expanse/expanse-dev/ubuntu"
qt_ppa = "http://ppa.launchpad.net/expanse/expanse-qt/ubuntu"
for step in [
# Set PPA dependencies for pbuilder
ShellCommand(
logEnviron=False,
name="pbuilder-opts",
description="setting pbuilderrc",
descriptionDone="set pbuilderrc",
command="echo 'OTHERMIRROR=\""
"deb [trusted=yes] {1} {0} main|deb-src [trusted=yes] {1} {0} main|"
"deb [trusted=yes] {2} {0} main|deb-src [trusted=yes] {2} {0} main|"
"deb [trusted=yes] {3} {0} main|deb-src [trusted=yes] {3} {0} main\"' > ~/.pbuilderrc"
.format(distribution, main_ppa, dev_ppa, qt_ppa)
),
# Package that thing already
UbuCowbuilder(
logEnviron=False,
architecture=architecture,
distribution=distribution,
basetgz="/var/cache/pbuilder/%s-%s-expanse.cow" % (distribution, architecture),
keyring="/usr/share/keyrings/ubuntu-archive-keyring.gpg"
)
]: factory.addStep(step)
for step in [
# Run Lintian
# DebLintian(
# fileloc=Interpolate("%(prop:deb-changes)s")
# ),
# Prepare .changes file for Launchpad
ShellCommand(
name='prepare-changes',
description='preparing changes',
descriptionDone='prepare changes',
command=Interpolate("sed -i -e s/UNRELEASED/%(kw:dist)s/ "
"-e s/urgency=medium/urgency=low/ ../*.changes",
dist=distribution)
),
# Gather artefacts
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="move-packages",
description='moving packages',
descriptionDone='move packages',
command="mkdir result; mv %s../*.changes ../*.dsc ../*.gz %sresult/" %
("*.deb *.changes " if name in ['expanse', 'cpp-expanse'] else "",
"../*.xz " if name == 'qtwebengine-opensource-src' else ""),
),
# Upload result folder
DirectoryUpload(
slavesrc="result",
masterdest=Interpolate("public_html/builds/%(prop:buildername)s/%(prop:buildnumber)s"),
url=Interpolate("/builds/%(prop:buildername)s/%(prop:buildnumber)s"),
),
# Clean latest link
MasterShellCommand(
name='clean-latest',
description='cleaning latest link',
descriptionDone='clean latest link',
command=['rm', '-f', Interpolate("public_html/builds/%(prop:buildername)s/latest")]
),
# Link latest
MasterShellCommand(
name='link-latest',
description='linking latest',
descriptionDone='link latest',
command=['ln', '-sf', Interpolate("%(prop:buildnumber)s"), Interpolate("public_html/builds/%(prop:buildername)s/latest")]
),
# Create source changes folders
MasterShellCommand(
name='mkdir-changes',
description='mkdir',
descriptionDone='mkdir',
command=['mkdir', '-p',
Interpolate("changes/%(kw:dist)s/%(kw:arch)s/%(kw:name)s",
dist=distribution, arch=architecture, name=name)]
),
# Link source changes
MasterShellCommand(
name='link-changes',
description='linking changes',
descriptionDone='link changes',
command=['ln', '-sf',
Interpolate("../../../../public_html/builds/%(prop:buildername)s/%(prop:buildnumber)s"),
Interpolate("changes/%(kw:dist)s/%(kw:arch)s/%(kw:name)s",
dist=distribution,
arch=architecture,
name=name)]
)
]: factory.addStep(step)
# Use expanse-dev ppa for snapshots, only dput one source pkg
ppa_suffix = ""
if branch == 'develop' or (name == 'libjson-rpc-cpp' and jsonrpc_for_develop):
ppa_suffix = "-dev"
elif name == 'qtwebengine-opensource-src':
ppa_suffix = "-qt"
if architecture == 'amd64':
for step in [
# debsign
MasterShellCommand(
haltOnFailure=False,
flunkOnFailure=False,
name='debsign',
description='debsigning',
descriptionDone='debsign',
command=['debsign', Interpolate("changes/%(kw:dist)s/%(kw:arch)s/"
"%(kw:name)s/%(prop:buildnumber)s/"
"%(kw:name)s_%(kw:version)s%(prop:snapshot)s-"
"0ubuntu1_source.changes",
dist=distribution,
arch=architecture,
name=name,
version=deb_version)]
),
# dput
MasterShellCommand(
name='dput',
description='dputting',
descriptionDone='dput',
command=['dput', 'ppa:%s%s' % ("caktux/ppa" if testdeb else "expanse/expanse", ppa_suffix),
Interpolate("changes/%(kw:dist)s/%(kw:arch)s/%(kw:name)s/"
"%(prop:buildnumber)s/%(kw:name)s_%(kw:version)s%(prop:snapshot)s-"
"0ubuntu1_source.changes",
dist=distribution,
arch=architecture,
name=name,
version=deb_version)]
)
]: factory.addStep(step)
return factory
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import factory
reload(factory)
from factory import *
import go_ethereum
reload(go_ethereum)
from go_ethereum import _go_cmds
@properties.renderer
def jsonrpc_for_develop(props):
if 'version' in props:
return int(props['version'][2:3]) > 3
return None
@properties.renderer
def deb_version(props):
if 'version' in props:
if ":" in props['version']:
return props['version'][2:]
else:
return props['version']
return None
def deb_factory(name=None, repourl=None, ppabranch=None, branch='master', distribution='trusty', architecture='i386', testdeb=False):
factory = BuildFactory()
for step in [
Git(
haltOnFailure=True,
logEnviron=False,
repourl=repourl,
branch=branch,
mode='full',
method='copy',
retry=(5, 3)
),
# Set snapshot property for Launchpad versioning
SetProperty(
description="setting snapshot",
descriptionDone="set snapshot",
name="set-snapshot",
property="snapshot",
value=Interpolate("+%(prop:buildnumber)s%(kw:snapshot)s%(kw:distribution)s",
snapshot=(dev_snapshot if branch == 'develop' or testdeb else ""),
distribution=distribution)
)
]: factory.addStep(step)
# Run 'go get' for go-expanse
if name == 'expanse':
for step in [
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="move-src",
command=_go_cmds(branch=branch),
description="moving src",
descriptionDone="move src",
env={"GOPATH": Interpolate("%(prop:workdir)s/go")}
),
ShellCommand(
logEnviron=False,
name="source-tarball",
description="creating source tarball",
descriptionDone="create source tarball",
command=Interpolate("tar --exclude .git --exclude pkg --exclude bin -czf "
"../%(kw:name)s_%(prop:version)s%(prop:snapshot)s.orig.tar.gz .", name=name),
workdir=Interpolate("%(prop:workdir)s/go")
),
# clean up the Git checkout for debuild
ShellCommand(
logEnviron=False,
name="clean-build",
command="rm -rf build && mkdir build",
description="cleaning build",
descriptionDone="clean build",
workdir=Interpolate("%(prop:workdir)s")
)
]: factory.addStep(step)
# Get qtwebengine-opensource-src tarball
elif name == 'qtwebengine-opensource-src':
for step in [
ShellCommand(
logEnviron=False,
name="source-tarball",
description="getting source tarball",
descriptionDone="get source tarball",
command=Interpolate("wget -c https://download.qt.io/official_releases/qt/5.4/%(kw:version)s/submodules/"
"qtwebengine-opensource-src-%(kw:version)s.tar.xz "
"-O ../%(kw:name)s_%(prop:version)s%(prop:snapshot)s.orig.tar.xz",
name=name,
version=branch)
),
# clean up the Git checkout for debuild
ShellCommand(
logEnviron=False,
name="clean-build",
command="rm -rf build && mkdir build",
description="cleaning build",
descriptionDone="clean build",
workdir=Interpolate("%(prop:workdir)s")
)
]: factory.addStep(step)
# Just create the source tarball for others
else:
factory.addStep(ShellCommand(
logEnviron=False,
name="source-tarball",
description="creating source tarball",
descriptionDone="create source tarball",
command=Interpolate("tar --exclude .git -czf "
"../%(kw:name)s_%(kw:version)s%(prop:snapshot)s.orig.tar.gz .",
name=name,
version=deb_version)
))
for step in [
# Get debian/ directory
ShellCommand(
logEnviron=False,
name="get-debian",
description="getting debian folder",
descriptionDone="get debian folder",
command=Interpolate("wget https://github.com/expanse-project/expanse-ppa/archive/%(kw:ppabranch)s.tar.gz -O- |"
" tar -zx --exclude package.sh --exclude README.md --strip-components=1",
ppabranch=ppabranch)
),
# Bump version
ShellCommand(
logEnviron=False,
name="bump-debian",
description="bumping %s deb version" % distribution,
descriptionDone="bump %s deb version" % distribution,
command=Interpolate("EMAIL='caktux (Buildserver key) <<EMAIL>>' "
"dch -v %(prop:version)s%(prop:snapshot)s-0ubuntu1 "
"'git build of %(prop:got_revision)s'",
dist=distribution)
),
# Build a source package
ShellCommand(
logEnviron=False,
name="source-package",
description="debuilding %s" % distribution,
descriptionDone="debuild %s" % distribution,
command="debuild -S -sa -us -uc"
),
]: factory.addStep(step)
# Source only packages for dependencies, build local deb packages otherwise
if name in ['expanse', 'cpp-expanse']:
# Add pbuilderrc with ccache config
# factory.addStep(FileDownload(
# mastersrc="pbuilderrc",
# slavedest="~/.pbuilderrc"
# ))
main_ppa = "http://ppa.launchpad.net/expanse/expanse/ubuntu"
dev_ppa = "http://ppa.launchpad.net/expanse/expanse-dev/ubuntu"
qt_ppa = "http://ppa.launchpad.net/expanse/expanse-qt/ubuntu"
for step in [
# Set PPA dependencies for pbuilder
ShellCommand(
logEnviron=False,
name="pbuilder-opts",
description="setting pbuilderrc",
descriptionDone="set pbuilderrc",
command="echo 'OTHERMIRROR=\""
"deb [trusted=yes] {1} {0} main|deb-src [trusted=yes] {1} {0} main|"
"deb [trusted=yes] {2} {0} main|deb-src [trusted=yes] {2} {0} main|"
"deb [trusted=yes] {3} {0} main|deb-src [trusted=yes] {3} {0} main\"' > ~/.pbuilderrc"
.format(distribution, main_ppa, dev_ppa, qt_ppa)
),
# Package that thing already
UbuCowbuilder(
logEnviron=False,
architecture=architecture,
distribution=distribution,
basetgz="/var/cache/pbuilder/%s-%s-expanse.cow" % (distribution, architecture),
keyring="/usr/share/keyrings/ubuntu-archive-keyring.gpg"
)
]: factory.addStep(step)
for step in [
# Run Lintian
# DebLintian(
# fileloc=Interpolate("%(prop:deb-changes)s")
# ),
# Prepare .changes file for Launchpad
ShellCommand(
name='prepare-changes',
description='preparing changes',
descriptionDone='prepare changes',
command=Interpolate("sed -i -e s/UNRELEASED/%(kw:dist)s/ "
"-e s/urgency=medium/urgency=low/ ../*.changes",
dist=distribution)
),
# Gather artefacts
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="move-packages",
description='moving packages',
descriptionDone='move packages',
command="mkdir result; mv %s../*.changes ../*.dsc ../*.gz %sresult/" %
("*.deb *.changes " if name in ['expanse', 'cpp-expanse'] else "",
"../*.xz " if name == 'qtwebengine-opensource-src' else ""),
),
# Upload result folder
DirectoryUpload(
slavesrc="result",
masterdest=Interpolate("public_html/builds/%(prop:buildername)s/%(prop:buildnumber)s"),
url=Interpolate("/builds/%(prop:buildername)s/%(prop:buildnumber)s"),
),
# Clean latest link
MasterShellCommand(
name='clean-latest',
description='cleaning latest link',
descriptionDone='clean latest link',
command=['rm', '-f', Interpolate("public_html/builds/%(prop:buildername)s/latest")]
),
# Link latest
MasterShellCommand(
name='link-latest',
description='linking latest',
descriptionDone='link latest',
command=['ln', '-sf', Interpolate("%(prop:buildnumber)s"), Interpolate("public_html/builds/%(prop:buildername)s/latest")]
),
# Create source changes folders
MasterShellCommand(
name='mkdir-changes',
description='mkdir',
descriptionDone='mkdir',
command=['mkdir', '-p',
Interpolate("changes/%(kw:dist)s/%(kw:arch)s/%(kw:name)s",
dist=distribution, arch=architecture, name=name)]
),
# Link source changes
MasterShellCommand(
name='link-changes',
description='linking changes',
descriptionDone='link changes',
command=['ln', '-sf',
Interpolate("../../../../public_html/builds/%(prop:buildername)s/%(prop:buildnumber)s"),
Interpolate("changes/%(kw:dist)s/%(kw:arch)s/%(kw:name)s",
dist=distribution,
arch=architecture,
name=name)]
)
]: factory.addStep(step)
# Use expanse-dev ppa for snapshots, only dput one source pkg
ppa_suffix = ""
if branch == 'develop' or (name == 'libjson-rpc-cpp' and jsonrpc_for_develop):
ppa_suffix = "-dev"
elif name == 'qtwebengine-opensource-src':
ppa_suffix = "-qt"
if architecture == 'amd64':
for step in [
# debsign
MasterShellCommand(
haltOnFailure=False,
flunkOnFailure=False,
name='debsign',
description='debsigning',
descriptionDone='debsign',
command=['debsign', Interpolate("changes/%(kw:dist)s/%(kw:arch)s/"
"%(kw:name)s/%(prop:buildnumber)s/"
"%(kw:name)s_%(kw:version)s%(prop:snapshot)s-"
"0ubuntu1_source.changes",
dist=distribution,
arch=architecture,
name=name,
version=deb_version)]
),
# dput
MasterShellCommand(
name='dput',
description='dputting',
descriptionDone='dput',
command=['dput', 'ppa:%s%s' % ("caktux/ppa" if testdeb else "expanse/expanse", ppa_suffix),
Interpolate("changes/%(kw:dist)s/%(kw:arch)s/%(kw:name)s/"
"%(prop:buildnumber)s/%(kw:name)s_%(kw:version)s%(prop:snapshot)s-"
"0ubuntu1_source.changes",
dist=distribution,
arch=architecture,
name=name,
version=deb_version)]
)
]: factory.addStep(step)
return factory | en | 0.503376 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Set snapshot property for Launchpad versioning # Run 'go get' for go-expanse # clean up the Git checkout for debuild # Get qtwebengine-opensource-src tarball # clean up the Git checkout for debuild # Just create the source tarball for others # Get debian/ directory # Bump version # Build a source package # Source only packages for dependencies, build local deb packages otherwise # Add pbuilderrc with ccache config # factory.addStep(FileDownload( # mastersrc="pbuilderrc", # slavedest="~/.pbuilderrc" # )) # Set PPA dependencies for pbuilder # Package that thing already # Run Lintian # DebLintian( # fileloc=Interpolate("%(prop:deb-changes)s") # ), # Prepare .changes file for Launchpad # Gather artefacts # Upload result folder # Clean latest link # Link latest # Create source changes folders # Link source changes # Use expanse-dev ppa for snapshots, only dput one source pkg # debsign # dput | 1.938096 | 2 |
TASK-8/question 1.py | neha865/Cognizance | 0 | 6633183 | <filename>TASK-8/question 1.py
import numpy as np
a = np.array([10,11,12,13,14])
nz = 5
Z0 = np.zeros(len(a) + (len(a)-1)*(nz))
Z0[::nz+1] = a
print(np.floor(Z0))
| <filename>TASK-8/question 1.py
import numpy as np
a = np.array([10,11,12,13,14])
nz = 5
Z0 = np.zeros(len(a) + (len(a)-1)*(nz))
Z0[::nz+1] = a
print(np.floor(Z0))
| none | 1 | 3.379657 | 3 |
|
dataPipelines/gc_neo4j_publisher/neo4j_publisher.py | Wildertrek/gamechanger-data | 18 | 6633184 | <filename>dataPipelines/gc_neo4j_publisher/neo4j_publisher.py
import json
import os
import time
import typing as t
import sys
from pathlib import Path
import pandas as pd
from joblib._multiprocessing_helpers import mp
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
from gamechangerml.src.featurization.abbreviation import expand_abbreviations_no_context
from gamechangerml.src.featurization.responsibilities import get_responsibilities
from dataPipelines.gc_neo4j_publisher.config import Config
from dataPipelines.gc_neo4j_publisher import wiki_utils as wu
from neo4j import exceptions
import common.utils.text_utils as tu
import re
from .config import Config as MainConfig
from functools import lru_cache
@lru_cache(maxsize=None)
def get_abbcount_dict() -> t.Dict[str, t.Any]:
with open(Config.abbcount_json_path, "r") as file:
dic = json.load(file)
return dic
@lru_cache(maxsize=None)
def get_agency_names() -> t.List[str]:
df = pd.read_csv(Config.agencies_csv_path)
agencies = list(df['Agency_Name'])
agencies = [x.lower() for x in agencies]
return agencies
def process_ent(ent: str) -> t.Union[t.List[str], str]:
first_word = ent.split(" ")[0]
if (
first_word.upper() == "THE"
or len(first_word) == 1
or first_word.upper() == "THIS"
):
ent = ent.split(" ")[1:]
ent = " ".join(ent)
if "......." in ent:
ent = ent.split(".....")[0]
new_ent = expand_abbreviations_no_context(ent, dic=get_abbcount_dict())
if len(new_ent) > 0:
return new_ent[0]
else:
return ent
def process_query(query: str) -> None:
with MainConfig.connection_helper.neo4j_session_scope() as session:
try_count = 0
while try_count <= 10:
try:
result = session.run(query)
return
except exceptions.TransientError:
try_count += 1
time.sleep(10)
except Exception as e:
try_count += 1
time.sleep(10)
print("Error with query: {0}. Error: {1}".format(query, e))
class Neo4jPublisher:
def __init__(self):
self.entEntRelationsStmt = []
self.verifiedEnts = pd.DataFrame()
self.crowdsourcedEnts = set()
def process_json(self, filepath: str, q: mp.Queue) -> str:
with open(filepath) as f:
j = json.load(f)
o = {}
o["id"] = j.get("id", "")
o["doc_num"] = j.get("doc_num", "")
o["doc_type"] = j.get("doc_type", "")
o["display_title_s"] = j.get("display_title_s", "")
o["display_org_s"] = j.get("display_org_s", "")
o["display_doc_type_s"] = j.get("display_doc_type_s", "")
o["ref_list"] = [s.replace("'", '\"') for s in j.get("ref_list", [])]
o["access_timestamp_dt"] = j.get("access_timestamp_dt", "")
o["publication_date_dt"] = (j.get("publication_date_dt", "") or "")
o["crawler_used_s"] = j.get("crawler_used_s", "")
o["source_fqdn_s"] = j.get("source_fqdn_s", "")
o["source_page_url_s"] = j.get("source_page_url_s", "")
o["download_url_s"] = j.get("download_url_s", '')
o["cac_login_required_b"] = j.get("cac_login_required_b", False)
o["title"] = j.get("title", "").replace('"', "\'")
o["keyw_5"] = [s.encode('ascii', 'ignore').decode('utf-8') for s in j.get("keyw_5", [])]
o["filename"] = j.get("filename", "")
o["summary_30"] = j.get("summary_30", "")
o["type"] = j.get("type", "")
o["page_count"] = j.get("page_count", 0)
o["topics_rs"] = j.get("topics_rs", [])
o["init_date"] = j.get("init_date", "")
o["change_date"] = j.get("change_date", "")
o["author"] = j.get("author", "")
o["signature"] = j.get("signature", "")
o["subject"] = j.get("subject", "")
o["classification"] = j.get("classification", "")
o["group_s"] = j.get("group_s", "")
o["pagerank_r"] = j.get("pagerank_r", 0)
o["kw_doc_score_r"] = j.get("kw_doc_score_r", 0)
o["version_hash_s"] = j.get("version_hash_s", "")
o["is_revoked_b"] = j.get("is_revoked_b", False)
o["entities"] = self.process_entity_list(j)
process_query('CALL policy.createDocumentNodesFromJson(' + json.dumps(json.dumps(o)) + ')')
# # TODO responsibilities
# text = j["text"]
# self.process_responsibilities(text)
# TODO paragraphs
# self.process_paragraphs(j, doc_id)
q.put(1)
return id
def process_responsibilities(self, text: str) -> None:
resp = get_responsibilities(text, agencies=get_agency_names())
if resp:
for d in resp.values():
ent = d["Agency"]
resps = d["Responsibilities"]
if ent:
filtered_ent = self.filter_ents(ent.strip())
if filtered_ent:
for r in resps:
process_query(
'MATCH (e: Entity) WHERE toLower(e.name) = \"'
+ filtered_ent.lower()
+ '\" '
+ 'MERGE (r: Responsibility {name: \"'
+ r
+ '\"}) '
+ 'MERGE (e)-[:RESPONSIBLE_FOR]->(r);'
)
return
# TODO: refactor param injection logic for cypher statements to guarantee valid statements for all valid strings
@staticmethod
def _normalize_string(s: str) -> str:
"""Normalize string to something that won't interfere with a cypher query"""
return tu.str_chain_apply(
s,
[
tu.translate_to_ascii_string,
tu.squash_whitespace_to_spaces,
tu.remove_plus_signs,
lambda _s: re.sub(r"""['"]\s*['"]""", "", _s), # remove empty quotes
lambda _s: re.sub(r'"', r'', _s), # remove double quotes
tu.squash_non_word_characters
]
)
def process_paragraphs(self, j: t.Dict[str, t.Any], doc_id: str) -> None:
for idx, p in enumerate(j["paragraphs"]):
process_query(
'MERGE (a: Document {doc_id: \"'
+ doc_id
+ '\"}) '
+ 'MERGE (p:Paragraph {par_id: \"' + p['id'] + '\"}) '
+ 'SET p.page_num_i = ' + str(p['page_num_i'])
+ ', p.par_count_i = ' + str(p['par_count_i'])
+ ', p.par_raw_text_t = \"' + self._normalize_string(p['par_raw_text_t']) + '\" '
+ ', p.doc_id = \"' + doc_id + '\" '
+ 'CREATE (a)-[:CONTAINS]->(p);'
)
return
def process_entity_list(self, j: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]:
entity_dict: t.Dict[str, t.Any] = {}
entity_count: t.Dict[str, int] = {}
try:
for p in j["paragraphs"]:
entities = p["entities"]
types = list(entities.keys())
for type in types:
entity_list = entities[type]
for ent in (self._normalize_string(e) for e in entity_list):
ans = self.filter_ents(ent)
if len(ans) > 0:
if ans not in entity_dict:
entity_dict[ans] = []
entity_count[ans] = 0
entity_dict[ans].append(p["par_inc_count"])
entity_count[ans] += 1
except:
print('Error creatign entities for: ' + j["id"], file=sys.stderr)
return {"entityPars": entity_dict, "entityCounts": entity_count}
def populate_verified_ents(self, csv: str = Config.agencies_csv_path) -> None:
csv_untrimmed = pd.read_csv(csv, na_filter=False)
csv = csv_untrimmed.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
self.verifiedEnts = csv
if Config.does_assist_table_exist():
with Config.connection_helper.web_db_session_scope('ro') as session:
verified_pg = session.execute("SELECT tokens_assumed FROM gc_assists WHERE tagged_correctly=true")
verified = [el[0] for el in verified_pg]
else:
verified = []
print("Could not retrieve gc_assists table - it doesn't exist.", file=sys.stderr)
# process entities
processed_set = set()
upper_set = set()
for ent in verified:
new_ent = process_ent(ent)
if new_ent.upper() not in upper_set:
upper_set.add(new_ent.upper())
processed_set.add(new_ent)
self.crowdsourcedEnts = processed_set
def process_entity_relationships(self) -> None:
total_ents = len(self.verifiedEnts)
print('Inserting {0} entities ...'.format(total_ents))
entity_json = self.verifiedEnts.to_json(orient="records")
process_query('CALL policy.createEntityNodesFromJson(' + json.dumps(entity_json) + ')')
return
def process_dir(self, files: t.List[str], file_dir: str, q: mp.Queue, max_threads: int) -> None:
if not files:
return
with ThreadPoolExecutor(max_workers=min(max_threads, 16)) as ex:
futures = []
for filename in files:
try:
if filename.endswith('.json'):
futures.append(ex.submit(self.process_json(os.path.join(file_dir, filename), q)))
except Exception as err:
print('RuntimeError in: ' + filename + ' Error: ' + str(err), file=sys.stderr)
q.put(1)
return
def filter_ents(self, ent: str) -> str:
new_ent = process_ent(ent)
name_df = self.verifiedEnts[
self.verifiedEnts["Agency_Name"].str.upper() == new_ent.upper()
]
if len(name_df):
return name_df.iloc[0, 1]
else:
if new_ent in self.crowdsourcedEnts:
return new_ent
else:
return ""
def process_crowdsourced_ents(self, without_web_scraping: bool, infobox_dir: t.Optional[str] = None):
# check that if no web scraping, we have infobox-dir defined.
# check that infobox-dir was specified, is a directory, has stuff in it if not webscraping.
if without_web_scraping:
if infobox_dir is None:
print("ERROR: infobox-dir was not specified with --without-web-scraping. run the command again with "
"--infobox-dir specified.")
return
if not Path(infobox_dir).is_dir():
print("ERROR: infobox-dir is not a directory with --without-web-scraping. Run the command again with "
"--infobox-dir pointing to a directory.")
return
if not list(Path(infobox_dir).iterdir()):
print("ERROR: infobox-dir is an empty directory with --without-web-scraping. Run the command again "
"with --infobox-dir pointing to a non-empty directory.")
return
total_ents = len(self.crowdsourcedEnts)
print('Inserting {0} entities...'.format(total_ents))
# get the info from wiki page if possible
for ent in self.crowdsourcedEnts:
if without_web_scraping:
# read in json
filename = infobox_dir + '/' + ent + '_infobox.json'
if os.path.exists(filename):
f = open(filename)
info = json.load(f)
else:
info = {}
print("Infobox file does not exist for entity {0}".format(ent))
else:
info = wu.get_infobox_info(ent)
if 'Redirect_Name' in info.keys():
name = info['Redirect_Name']
else:
name = ent
# s is the insert statement for this entity's node
s = 'MERGE (e:Entity {name: \"' + self._normalize_string(name) + '\"}) '
# loop through the keys and add the metadata to the node
for key in info.keys():
if key == 'Redirect_Name': # we don't need this as it's just name in the metadata
continue
# r is the relationship statement between nodes
r = 'MATCH (e:Entity) where e.name =~ \"(?i)' + self._normalize_string(name) + '\" '
ins = info[key]
# sometimes the value is a list depending on HTML format, so unwrap it
if isinstance(ins, list):
for exp in ins:
# find if the value is a node that already exists. if it is, add a relationship using key
# as the relation
# create rule for child_agency/child_agencies
if self._normalize_string(key) == 'Child_agencies' or self._normalize_string(
key) == 'Child_agency':
rel = 'HAS_CHILD'
else:
rel = key
r += 'MATCH (f: Entity) where f.name =~ \"(?i)' + exp + '\" '
r += 'CREATE (e)-[:' + self._normalize_string(rel).upper() + ']->(f)'
self.entEntRelationsStmt.append(r)
# reset the relationship insert string
r = 'MATCH (e:Entity) where e.name =~ \"(?i)' + self._normalize_string(name) + '\" '
# must unwind the list to add to neo4j as a param ([1,2,3] -> '1;2;3')
ins = ''
for el in info[key]:
ins += el + '; '
ins = ins[:-2]
else:
# create rule for child_agency/child_agencies
if self._normalize_string(key) == 'Child_agencies' or self._normalize_string(key) == 'Child_agency':
rel = 'HAS_CHILD'
else:
rel = key
# create the relationships
r += 'MATCH (f: Entity) where f.name =~ \"(?i)' + self._normalize_string(ins) + '\" '
r += 'CREATE (e)-[:' + self._normalize_string(rel).upper() + ']->(f)'
self.entEntRelationsStmt.append(r)
s += 'SET e.' + self._normalize_string(key) + '= \"' + ins + '\" '
process_query(s + ';')
self.entEntRelationsStmt = list(set(self.entEntRelationsStmt))
for r in self.entEntRelationsStmt:
process_query(r)
| <filename>dataPipelines/gc_neo4j_publisher/neo4j_publisher.py
import json
import os
import time
import typing as t
import sys
from pathlib import Path
import pandas as pd
from joblib._multiprocessing_helpers import mp
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
from gamechangerml.src.featurization.abbreviation import expand_abbreviations_no_context
from gamechangerml.src.featurization.responsibilities import get_responsibilities
from dataPipelines.gc_neo4j_publisher.config import Config
from dataPipelines.gc_neo4j_publisher import wiki_utils as wu
from neo4j import exceptions
import common.utils.text_utils as tu
import re
from .config import Config as MainConfig
from functools import lru_cache
@lru_cache(maxsize=None)
def get_abbcount_dict() -> t.Dict[str, t.Any]:
with open(Config.abbcount_json_path, "r") as file:
dic = json.load(file)
return dic
@lru_cache(maxsize=None)
def get_agency_names() -> t.List[str]:
df = pd.read_csv(Config.agencies_csv_path)
agencies = list(df['Agency_Name'])
agencies = [x.lower() for x in agencies]
return agencies
def process_ent(ent: str) -> t.Union[t.List[str], str]:
first_word = ent.split(" ")[0]
if (
first_word.upper() == "THE"
or len(first_word) == 1
or first_word.upper() == "THIS"
):
ent = ent.split(" ")[1:]
ent = " ".join(ent)
if "......." in ent:
ent = ent.split(".....")[0]
new_ent = expand_abbreviations_no_context(ent, dic=get_abbcount_dict())
if len(new_ent) > 0:
return new_ent[0]
else:
return ent
def process_query(query: str) -> None:
with MainConfig.connection_helper.neo4j_session_scope() as session:
try_count = 0
while try_count <= 10:
try:
result = session.run(query)
return
except exceptions.TransientError:
try_count += 1
time.sleep(10)
except Exception as e:
try_count += 1
time.sleep(10)
print("Error with query: {0}. Error: {1}".format(query, e))
class Neo4jPublisher:
def __init__(self):
self.entEntRelationsStmt = []
self.verifiedEnts = pd.DataFrame()
self.crowdsourcedEnts = set()
def process_json(self, filepath: str, q: mp.Queue) -> str:
with open(filepath) as f:
j = json.load(f)
o = {}
o["id"] = j.get("id", "")
o["doc_num"] = j.get("doc_num", "")
o["doc_type"] = j.get("doc_type", "")
o["display_title_s"] = j.get("display_title_s", "")
o["display_org_s"] = j.get("display_org_s", "")
o["display_doc_type_s"] = j.get("display_doc_type_s", "")
o["ref_list"] = [s.replace("'", '\"') for s in j.get("ref_list", [])]
o["access_timestamp_dt"] = j.get("access_timestamp_dt", "")
o["publication_date_dt"] = (j.get("publication_date_dt", "") or "")
o["crawler_used_s"] = j.get("crawler_used_s", "")
o["source_fqdn_s"] = j.get("source_fqdn_s", "")
o["source_page_url_s"] = j.get("source_page_url_s", "")
o["download_url_s"] = j.get("download_url_s", '')
o["cac_login_required_b"] = j.get("cac_login_required_b", False)
o["title"] = j.get("title", "").replace('"', "\'")
o["keyw_5"] = [s.encode('ascii', 'ignore').decode('utf-8') for s in j.get("keyw_5", [])]
o["filename"] = j.get("filename", "")
o["summary_30"] = j.get("summary_30", "")
o["type"] = j.get("type", "")
o["page_count"] = j.get("page_count", 0)
o["topics_rs"] = j.get("topics_rs", [])
o["init_date"] = j.get("init_date", "")
o["change_date"] = j.get("change_date", "")
o["author"] = j.get("author", "")
o["signature"] = j.get("signature", "")
o["subject"] = j.get("subject", "")
o["classification"] = j.get("classification", "")
o["group_s"] = j.get("group_s", "")
o["pagerank_r"] = j.get("pagerank_r", 0)
o["kw_doc_score_r"] = j.get("kw_doc_score_r", 0)
o["version_hash_s"] = j.get("version_hash_s", "")
o["is_revoked_b"] = j.get("is_revoked_b", False)
o["entities"] = self.process_entity_list(j)
process_query('CALL policy.createDocumentNodesFromJson(' + json.dumps(json.dumps(o)) + ')')
# # TODO responsibilities
# text = j["text"]
# self.process_responsibilities(text)
# TODO paragraphs
# self.process_paragraphs(j, doc_id)
q.put(1)
return id
def process_responsibilities(self, text: str) -> None:
resp = get_responsibilities(text, agencies=get_agency_names())
if resp:
for d in resp.values():
ent = d["Agency"]
resps = d["Responsibilities"]
if ent:
filtered_ent = self.filter_ents(ent.strip())
if filtered_ent:
for r in resps:
process_query(
'MATCH (e: Entity) WHERE toLower(e.name) = \"'
+ filtered_ent.lower()
+ '\" '
+ 'MERGE (r: Responsibility {name: \"'
+ r
+ '\"}) '
+ 'MERGE (e)-[:RESPONSIBLE_FOR]->(r);'
)
return
# TODO: refactor param injection logic for cypher statements to guarantee valid statements for all valid strings
@staticmethod
def _normalize_string(s: str) -> str:
"""Normalize string to something that won't interfere with a cypher query"""
return tu.str_chain_apply(
s,
[
tu.translate_to_ascii_string,
tu.squash_whitespace_to_spaces,
tu.remove_plus_signs,
lambda _s: re.sub(r"""['"]\s*['"]""", "", _s), # remove empty quotes
lambda _s: re.sub(r'"', r'', _s), # remove double quotes
tu.squash_non_word_characters
]
)
def process_paragraphs(self, j: t.Dict[str, t.Any], doc_id: str) -> None:
for idx, p in enumerate(j["paragraphs"]):
process_query(
'MERGE (a: Document {doc_id: \"'
+ doc_id
+ '\"}) '
+ 'MERGE (p:Paragraph {par_id: \"' + p['id'] + '\"}) '
+ 'SET p.page_num_i = ' + str(p['page_num_i'])
+ ', p.par_count_i = ' + str(p['par_count_i'])
+ ', p.par_raw_text_t = \"' + self._normalize_string(p['par_raw_text_t']) + '\" '
+ ', p.doc_id = \"' + doc_id + '\" '
+ 'CREATE (a)-[:CONTAINS]->(p);'
)
return
def process_entity_list(self, j: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]:
entity_dict: t.Dict[str, t.Any] = {}
entity_count: t.Dict[str, int] = {}
try:
for p in j["paragraphs"]:
entities = p["entities"]
types = list(entities.keys())
for type in types:
entity_list = entities[type]
for ent in (self._normalize_string(e) for e in entity_list):
ans = self.filter_ents(ent)
if len(ans) > 0:
if ans not in entity_dict:
entity_dict[ans] = []
entity_count[ans] = 0
entity_dict[ans].append(p["par_inc_count"])
entity_count[ans] += 1
except:
print('Error creatign entities for: ' + j["id"], file=sys.stderr)
return {"entityPars": entity_dict, "entityCounts": entity_count}
def populate_verified_ents(self, csv: str = Config.agencies_csv_path) -> None:
csv_untrimmed = pd.read_csv(csv, na_filter=False)
csv = csv_untrimmed.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
self.verifiedEnts = csv
if Config.does_assist_table_exist():
with Config.connection_helper.web_db_session_scope('ro') as session:
verified_pg = session.execute("SELECT tokens_assumed FROM gc_assists WHERE tagged_correctly=true")
verified = [el[0] for el in verified_pg]
else:
verified = []
print("Could not retrieve gc_assists table - it doesn't exist.", file=sys.stderr)
# process entities
processed_set = set()
upper_set = set()
for ent in verified:
new_ent = process_ent(ent)
if new_ent.upper() not in upper_set:
upper_set.add(new_ent.upper())
processed_set.add(new_ent)
self.crowdsourcedEnts = processed_set
def process_entity_relationships(self) -> None:
total_ents = len(self.verifiedEnts)
print('Inserting {0} entities ...'.format(total_ents))
entity_json = self.verifiedEnts.to_json(orient="records")
process_query('CALL policy.createEntityNodesFromJson(' + json.dumps(entity_json) + ')')
return
def process_dir(self, files: t.List[str], file_dir: str, q: mp.Queue, max_threads: int) -> None:
if not files:
return
with ThreadPoolExecutor(max_workers=min(max_threads, 16)) as ex:
futures = []
for filename in files:
try:
if filename.endswith('.json'):
futures.append(ex.submit(self.process_json(os.path.join(file_dir, filename), q)))
except Exception as err:
print('RuntimeError in: ' + filename + ' Error: ' + str(err), file=sys.stderr)
q.put(1)
return
def filter_ents(self, ent: str) -> str:
new_ent = process_ent(ent)
name_df = self.verifiedEnts[
self.verifiedEnts["Agency_Name"].str.upper() == new_ent.upper()
]
if len(name_df):
return name_df.iloc[0, 1]
else:
if new_ent in self.crowdsourcedEnts:
return new_ent
else:
return ""
def process_crowdsourced_ents(self, without_web_scraping: bool, infobox_dir: t.Optional[str] = None):
# check that if no web scraping, we have infobox-dir defined.
# check that infobox-dir was specified, is a directory, has stuff in it if not webscraping.
if without_web_scraping:
if infobox_dir is None:
print("ERROR: infobox-dir was not specified with --without-web-scraping. run the command again with "
"--infobox-dir specified.")
return
if not Path(infobox_dir).is_dir():
print("ERROR: infobox-dir is not a directory with --without-web-scraping. Run the command again with "
"--infobox-dir pointing to a directory.")
return
if not list(Path(infobox_dir).iterdir()):
print("ERROR: infobox-dir is an empty directory with --without-web-scraping. Run the command again "
"with --infobox-dir pointing to a non-empty directory.")
return
total_ents = len(self.crowdsourcedEnts)
print('Inserting {0} entities...'.format(total_ents))
# get the info from wiki page if possible
for ent in self.crowdsourcedEnts:
if without_web_scraping:
# read in json
filename = infobox_dir + '/' + ent + '_infobox.json'
if os.path.exists(filename):
f = open(filename)
info = json.load(f)
else:
info = {}
print("Infobox file does not exist for entity {0}".format(ent))
else:
info = wu.get_infobox_info(ent)
if 'Redirect_Name' in info.keys():
name = info['Redirect_Name']
else:
name = ent
# s is the insert statement for this entity's node
s = 'MERGE (e:Entity {name: \"' + self._normalize_string(name) + '\"}) '
# loop through the keys and add the metadata to the node
for key in info.keys():
if key == 'Redirect_Name': # we don't need this as it's just name in the metadata
continue
# r is the relationship statement between nodes
r = 'MATCH (e:Entity) where e.name =~ \"(?i)' + self._normalize_string(name) + '\" '
ins = info[key]
# sometimes the value is a list depending on HTML format, so unwrap it
if isinstance(ins, list):
for exp in ins:
# find if the value is a node that already exists. if it is, add a relationship using key
# as the relation
# create rule for child_agency/child_agencies
if self._normalize_string(key) == 'Child_agencies' or self._normalize_string(
key) == 'Child_agency':
rel = 'HAS_CHILD'
else:
rel = key
r += 'MATCH (f: Entity) where f.name =~ \"(?i)' + exp + '\" '
r += 'CREATE (e)-[:' + self._normalize_string(rel).upper() + ']->(f)'
self.entEntRelationsStmt.append(r)
# reset the relationship insert string
r = 'MATCH (e:Entity) where e.name =~ \"(?i)' + self._normalize_string(name) + '\" '
# must unwind the list to add to neo4j as a param ([1,2,3] -> '1;2;3')
ins = ''
for el in info[key]:
ins += el + '; '
ins = ins[:-2]
else:
# create rule for child_agency/child_agencies
if self._normalize_string(key) == 'Child_agencies' or self._normalize_string(key) == 'Child_agency':
rel = 'HAS_CHILD'
else:
rel = key
# create the relationships
r += 'MATCH (f: Entity) where f.name =~ \"(?i)' + self._normalize_string(ins) + '\" '
r += 'CREATE (e)-[:' + self._normalize_string(rel).upper() + ']->(f)'
self.entEntRelationsStmt.append(r)
s += 'SET e.' + self._normalize_string(key) + '= \"' + ins + '\" '
process_query(s + ';')
self.entEntRelationsStmt = list(set(self.entEntRelationsStmt))
for r in self.entEntRelationsStmt:
process_query(r)
| en | 0.827355 | # # TODO responsibilities # text = j["text"] # self.process_responsibilities(text) # TODO paragraphs # self.process_paragraphs(j, doc_id) # TODO: refactor param injection logic for cypher statements to guarantee valid statements for all valid strings Normalize string to something that won't interfere with a cypher query ['"]\s*['"] # remove empty quotes # remove double quotes # process entities # check that if no web scraping, we have infobox-dir defined. # check that infobox-dir was specified, is a directory, has stuff in it if not webscraping. # get the info from wiki page if possible # read in json # s is the insert statement for this entity's node # loop through the keys and add the metadata to the node # we don't need this as it's just name in the metadata # r is the relationship statement between nodes # sometimes the value is a list depending on HTML format, so unwrap it # find if the value is a node that already exists. if it is, add a relationship using key # as the relation # create rule for child_agency/child_agencies # reset the relationship insert string # must unwind the list to add to neo4j as a param ([1,2,3] -> '1;2;3') # create rule for child_agency/child_agencies # create the relationships | 2.11961 | 2 |
tinytag/tests/test_all.py | idotobi/tinytag | 0 | 6633185 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# tests can be extended using other bigger files that are not going to be
# checked into git, by placing them into the custom_samples folder
#
# see custom_samples/instructions.txt
#
from __future__ import unicode_literals
import io
import os
import shutil
import sys
import tempfile
import pytest
import re
from pytest import raises
from tinytag import TinyTagException, TinyTag, ID3, Ogg, Wave, Flac
from tinytag.tinytag import Wma, MP4
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # python 2.6 and 3.2 compat
testfiles = OrderedDict([
# MP3
('samples/vbri.mp3', {'extra': {'url': ''}, 'channels': 2, 'samplerate': 44100, 'track_total': None, 'duration': 0.47020408163265304, 'album': 'I Can Walk On Water I Can Fly', 'year': '2007', 'title': 'I Can Walk On Water I Can Fly', 'artist': 'Basshunter', 'track': '01', 'filesize': 8192, 'audio_offset': 1007, 'genre': '(3)Dance', 'comment': '\ufeff\ufeffRipped by THSLIVE', 'composer': ''}),
('samples/cbr.mp3', {'extra': {}, 'channels': 2, 'samplerate': 44100, 'track_total': None, 'duration': 0.49, 'album': 'I Can Walk On Water I Can Fly', 'year': '2007', 'title': 'I Can Walk On Water I Can Fly', 'artist': 'Basshunter', 'track': '01', 'filesize': 8186, 'audio_offset': 246, 'bitrate': 128.0, 'genre': 'Dance', 'comment': 'Ripped by THSLIVE'}),
# the output of the lame encoder was 185.4 bitrate, but this is good enough for now
('samples/vbr_xing_header.mp3', {'extra': {}, 'bitrate': 186, 'channels': 1, 'samplerate': 44100, 'duration': 3.944489795918367, 'filesize': 91731, 'audio_offset': 441}),
('samples/vbr_xing_header_2channel.mp3', {'extra': {}, 'filesize': 2000, 'album': "The Harpers' Masque", 'artist': 'Knodel and Valencia', 'audio_offset': 694, 'bitrate': 46, 'channels': 2, 'duration': 250.04408163265308, 'samplerate': 22050, 'title': 'Lochaber No More', 'year': '1992'}),
('samples/id3v22-test.mp3', {'extra': {}, 'channels': 2, 'samplerate': 44100, 'track_total': '11', 'duration': 0.138, 'album': 'Hymns for the Exiled', 'year': '2004', 'title': 'cosmic american', 'artist': '<NAME>', 'track': '3', 'filesize': 5120, 'audio_offset': 2225, 'bitrate': 160.0, 'comment': 'Waterbug Records, www.anaismitchell.com'}),
('samples/silence-44-s-v1.mp3', {'extra': {}, 'channels': 2, 'samplerate': 44100, 'genre': 'Darkwave', 'track_total': None, 'duration': 3.7355102040816326, 'album': 'Quod Libet Test Data', 'year': '2004', 'title': 'Silence', 'artist': 'piman', 'track': '2', 'filesize': 15070, 'audio_offset': 0, 'bitrate': 32.0, 'comment': ''}),
('samples/id3v1-latin1.mp3', {'extra': {}, 'channels': None, 'samplerate': 44100, 'genre': 'Rock', 'samplerate': None, 'album': 'The Young Americans', 'title': 'Play Dead', 'filesize': 256, 'track': '12', 'artist': 'Björk', 'track_total': None, 'year': '1993', 'comment': ' '}),
('samples/UTF16.mp3', {'extra': {'text': 'MusicBrainz Artist Id664c3e0e-42d8-48c1-b209-1efca19c0325', 'url': 'WIKIPEDIA_RELEASEhttp://en.wikipedia.org/wiki/High_Violet'}, 'channels': None, 'samplerate': None, 'track_total': '11', 'track': '07', 'artist': 'The National', 'year': '2010', 'album': 'High Violet', 'title': 'Lemonworld', 'filesize': 20480, 'genre': 'Indie', 'comment': 'Track 7'}),
('samples/utf-8-id3v2.mp3', {'extra': {}, 'channels': None, 'samplerate': 44100, 'genre': 'Acustico', 'track_total': '21', 'track': '01', 'filesize': 2119, 'title': 'Gran día', 'artist': 'Paso a paso', 'album': 'S/T', 'year': None, 'samplerate': None, 'disc': '', 'disc_total': '0'}),
('samples/empty_file.mp3', {'extra': {}, 'channels': None, 'samplerate': None, 'track_total': None, 'album': None, 'year': None, 'title': None, 'track': None, 'artist': None, 'filesize': 0}),
('samples/silence-44khz-56k-mono-1s.mp3', {'extra': {}, 'channels': 1, 'samplerate': 44100, 'duration': 1.018, 'samplerate': 44100, 'filesize': 7280, 'audio_offset': 0, 'bitrate': 56.0}),
('samples/silence-22khz-mono-1s.mp3', {'extra': {}, 'channels': 1, 'samplerate': 22050, 'filesize': 4284, 'audio_offset': 0, 'bitrate': 32.0, 'duration': 1.0438932496075353}),
('samples/id3v24-long-title.mp3', {'extra': {}, 'track': '1', 'disc_total': '1', 'album': 'The Double EP: A Sea of Split Peas', 'filesize': 10000, 'channels': None, 'track_total': '12', 'genre': 'AlternRock', 'title': 'Out of the Woodwork', 'artist': '<NAME>', 'albumartist': '<NAME>', 'samplerate': None, 'year': None, 'disc': '1', 'comment': 'Amazon.com Song ID: 240853806', 'composer': '<NAME>'}),
('samples/utf16be.mp3', {'extra': {}, 'title': '52-girls', 'filesize': 2048, 'track': '6', 'album': 'party mix', 'artist': 'The B52s', 'genre': 'Rock', 'albumartist': None, 'disc': None, 'channels': None}),
('samples/id3v22_image.mp3', {'extra': {}, 'title': 'Kids (MGMT Cover) ', 'filesize': 35924, 'album': 'winniecooper.net ', 'artist': 'The Kooks', 'year': '2008', 'channels': None, 'genre': '.'}),
('samples/id3v22.TCO.genre.mp3', {'extra': {}, 'filesize': 500, 'album': 'ARTPOP', 'artist': 'Lady GaGa', 'comment': 'engiTunPGAP0', 'genre': 'Pop', 'title': 'Applause'}),
('samples/id3_comment_utf_16_with_bom.mp3', {'extra': {}, 'filesize': 19980, 'album': 'Ghosts I-IV', 'albumartist': 'Nine Inch Nails', 'artist': 'Nine Inch Nails', 'comment': '', 'disc': '1', 'disc_total': '2', 'title': '1 Ghosts I', 'track': '1', 'track_total': '36', 'year': '2008', 'comment': '3/4 time'}),
('samples/id3_comment_utf_16_double_bom.mp3', {'extra': {'text': 'LABEL\ufeffUnclear'}, 'filesize': 512, 'album': 'The Embrace', 'artist': '<NAME> & D.Diggler', 'comment': 'Unclear', 'title': 'The Embrace (Romano Alfieri Remix)', 'track': '04-johannes_heil_and_d.diggler-the_embrace_(romano_alfieri_remix)', 'year': '2012'}),
('samples/id3_genre_id_out_of_bounds.mp3', {'extra': {}, 'filesize': 512, 'album': 'MECHANICAL ANIMALS', 'artist': 'Manson', 'comment': '', 'genre': '(255)', 'title': '01 GREAT BIG WHITE WORLD', 'track': 'Marilyn', 'year': '0'}),
# OGG
('samples/empty.ogg', {'extra': {}, 'track_total': None, 'duration': 3.684716553287982, 'album': None, '_max_samplenum': 162496, 'year': None, 'title': None, 'artist': None, 'track': None, '_tags_parsed': False, 'filesize': 4328, 'audio_offset': 0, 'bitrate': 109.375, 'samplerate': 44100}),
('samples/multipagecomment.ogg', {'extra': {}, 'track_total': None, 'duration': 3.684716553287982, 'album': None, '_max_samplenum': 162496, 'year': None, 'title': None, 'artist': None, 'track': None, '_tags_parsed': False, 'filesize': 135694, 'audio_offset': 0, 'bitrate': 109.375, 'samplerate': 44100}),
('samples/multipage-setup.ogg', {'extra': {}, 'genre': 'JRock', 'track_total': None, 'duration': 4.128798185941043, 'album': 'Timeless', 'year': '2006', 'title': 'Burst', 'artist': 'UVERworld', 'track': '7', '_tags_parsed': False, 'filesize': 76983, 'audio_offset': 0, 'bitrate': 156.25, 'samplerate': 44100}),
('samples/test.ogg', {'extra': {}, 'track_total': None, 'duration': 1.0, 'album': 'the boss', 'year': '2006', 'title': 'the boss', 'artist': '<NAME>', 'track': '1', '_tags_parsed': False, 'filesize': 7467, 'audio_offset': 0, 'bitrate': 156.25, 'samplerate': 44100, 'comment': 'hello!'}),
('samples/corrupt_metadata.ogg', {'extra': {}, 'filesize': 18648, 'audio_offset': 0, 'bitrate': 78.125, 'duration': 2.132358276643991, 'samplerate': 44100}),
('samples/composer.ogg', {'extra': {}, 'filesize': 4480, 'album': 'An Album', 'artist': 'An Artist', 'audio_offset': 0, 'bitrate': 109.375, 'duration': 3.684716553287982, 'genre': 'Some Genre', 'samplerate': 44100, 'title': 'A Title', 'track': '2', 'year': '2007', 'composer': 'some composer'}),
# OPUS
('samples/test.opus', {'extra': {}, 'albumartist': 'Alstroemeria Records', 'samplerate': 48000, 'channels': 2, 'track': '1', 'disc': '1', 'title': 'Bad Apple!!', 'duration': 2.0, 'year': '2008.05.25', 'filesize': 10000, 'artist': 'nomico', 'album': 'Exserens - A selection of Alstroemeria Records', 'comment': 'ARCD0018 - Lovelight'}),
('samples/8khz_5s.opus', {'extra': {}, 'filesize': 7251, 'channels': 1, 'samplerate': 48000, 'duration': 5.0}),
# WAV
('samples/test.wav', {'extra': {}, 'channels': 2, 'duration': 1.0, 'filesize': 176444, 'bitrate': 1378.125, 'samplerate': 44100, 'audio_offest': 36}),
('samples/test3sMono.wav', {'extra': {}, 'channels': 1, 'duration': 3.0, 'filesize': 264644, 'bitrate': 689.0625, 'duration': 3.0, 'samplerate': 44100, 'audio_offest': 36}),
('samples/test-tagged.wav', {'extra': {}, 'channels': 2, 'duration': 1.0, 'filesize': 176688, 'album': 'thealbum', 'artist': 'theartisst', 'bitrate': 1378.125, 'genre': 'Acid', 'samplerate': 44100, 'title': 'thetitle', 'track': '66', 'audio_offest': 36, 'comment': 'hello', 'year': '2014'}),
('samples/test-riff-tags.wav', {'extra': {}, 'channels': 2, 'duration': 1.0, 'filesize': 176540, 'album': None, 'artist': 'theartisst', 'bitrate': 1378.125, 'genre': 'Acid', 'samplerate': 44100, 'title': 'thetitle', 'track': None, 'audio_offest': 36, 'comment': 'hello', 'year': '2014'}),
('samples/silence-22khz-mono-1s.wav', {'extra': {}, 'channels': 1, 'duration': 1.0, 'filesize': 48160, 'bitrate': 344.53125, 'samplerate': 22050, 'audio_offest': 4088}),
('samples/id3_header_with_a_zero_byte.wav', {'extra': {}, 'channels': 1, 'duration': 1.0, 'filesize': 44280, 'bitrate': 344.53125, 'samplerate': 22050, 'audio_offest': 122, 'artist': 'Purpley', 'title': 'Test000', 'track': '17'}),
# FLAC
('samples/flac1sMono.flac', {'extra': {}, 'genre': 'Avantgarde', 'track_total': None, 'album': 'alb', 'year': '2014', 'duration': 1.0, 'title': 'track', 'track': '23', 'artist': 'art', 'channels': 1, 'filesize': 26632, 'bitrate': 208.0625, 'samplerate': 44100}),
('samples/flac453sStereo.flac', {'extra': {}, 'channels': 2, 'track_total': None, 'album': None, 'year': None, 'duration': 453.51473922902494, 'title': None, 'track': None, 'artist': None, 'filesize': 84236, 'bitrate': 1.45109671875, 'samplerate': 44100}),
('samples/flac1.5sStereo.flac', {'extra': {}, 'channels': 2, 'track_total': None, 'album': 'alb', 'year': '2014', 'duration': 1.4995238095238095, 'title': 'track', 'track': '23', 'artist': 'art', 'filesize': 59868, 'bitrate': 311.9115195300095, 'genre': 'Avantgarde', 'samplerate': 44100}),
('samples/flac_application.flac', {'extra': {}, 'channels': 2, 'track_total': '11', 'album': 'Belle and Sebastian Write About Love', 'year': '2010-10-11', 'duration': 273.64, 'title': 'I Want the World to Stop', 'track': '4', 'artist': 'Belle and Sebastian', 'filesize': 13000, 'bitrate': 0.37115370559859673, 'samplerate': 44100}),
('samples/no-tags.flac', {'extra': {}, 'channels': 2, 'track_total': None, 'album': None, 'year': None, 'duration': 3.684716553287982, 'title': None, 'track': None, 'artist': None, 'filesize': 4692, 'bitrate': 9.94818718614612, 'samplerate': 44100}),
('samples/variable-block.flac', {'extra': {}, 'channels': 2, 'album': 'Appleseed Original Soundtrack', 'year': '2004', 'duration': 261.68, 'title': 'DIVE FOR YOU', 'track': '01', 'track_total': '11', 'artist': 'Boom Boom Satellites', 'filesize': 10240, 'bitrate': 0.3057169061449098, 'disc': '1', 'genre': 'Anime Soundtrack', 'samplerate': 44100, 'composer': 'Boom Boom Satellites (Lyrics)', 'disc_total': '2'}),
('samples/106-invalid-streaminfo.flac', {'extra': {}, 'filesize': 4692}),
('samples/106-short-picture-block-size.flac', {'extra': {}, 'filesize': 4692, 'bitrate': 9.94818718614612, 'channels': 2, 'duration': 3.68, 'samplerate': 44100}),
('samples/with_id3_header.flac', {'extra': {}, 'filesize': 64837, 'album': ' ', 'artist': '群星', 'disc': '0', 'title': 'A 梦 哆啦 机器猫 短信铃声', 'track': '0', 'bitrate': 1116.9186328125, 'channels': 1, 'duration': 0.45351473922902497, 'genre': 'genre', 'samplerate': 44100, 'year': '2018'}),
('samples/with_padded_id3_header.flac', {'extra': {}, 'filesize': 16070, 'album': 'album', 'albumartist': None, 'artist': 'artist', 'audio_offset': None, 'bitrate': 276.830859375, 'channels': 1, 'comment': None, 'disc': None, 'disc_total': None, 'duration': 0.45351473922902497, 'genre': 'genre', 'samplerate': 44100, 'title': 'title', 'track': '1', 'track_total': None, 'year': '2018'}),
('samples/with_padded_id3_header2.flac', {'extra': {}, 'filesize': 19522, 'album': 'Unbekannter Titel', 'albumartist': None, 'artist': '<NAME>', 'audio_offset': None, 'bitrate': 336.29695312499996, 'channels': 1, 'comment': None, 'disc': '1', 'disc_total': '1', 'duration': 0.45351473922902497, 'genre': 'genre', 'samplerate': 44100, 'title': 'Track01', 'track': '01', 'track_total': '05', 'year': '2018'}),
('samples/flac_with_image.flac', {'extra': {}, 'filesize': 80000, 'album': 'smilin´ in circles', 'artist': '<NAME>', 'bitrate': 7.479655337482049, 'channels': 2, 'disc': '1', 'disc_total': '1', 'duration': 83.56, 'genre': 'Blues', 'samplerate': 44100, 'title': 'intro', 'track': '01', 'track_total': '8'}),
# WMA
('samples/test2.wma', {'extra': {}, 'samplerate': 44100, 'album': 'The Colour and the Shape', 'title': 'Doll', 'bitrate': 64.04, 'filesize': 5800, 'track': '1', 'albumartist': 'Foo Fighters', 'artist': '<NAME>', 'duration': 86.406, 'track_total': None, 'year': '1997', 'genre': 'Alternative', 'comment': '', 'composer': '<NAME>'}),
# M4A/MP4
('samples/test.m4a', {'extra': {}, 'samplerate': 44100, 'duration': 314.97, 'bitrate': 256.0, 'channels': 2, 'genre': 'Pop', 'year': '2011', 'title': 'Nothing', 'album': 'Only Our Hearts To Lose', 'track_total': '11', 'track': '11', 'artist': 'Marian', 'filesize': 61432}),
('samples/test2.m4a', {'extra': {}, 'bitrate': 256.0, 'track': '1', 'albumartist': "<NAME> - Get It Out 'cha System - 1978", 'duration': 167.78739229024944, 'filesize': 223365, 'channels': 2, 'year': '1978', 'artist': '<NAME>', 'track_total': '9', 'disc_total': '1', 'genre': 'R&B/Soul', 'album': "Get It Out 'cha System", 'samplerate': 44100, 'disc': '1', 'title': 'Go Out and Get Some', 'comment': "<NAME> - Get It Out 'cha System - 1978", 'composer': "<NAME> - Get It Out 'cha System - 1978"}),
('samples/iso8859_with_image.m4a', {'extra': {}, 'artist': 'Major Lazer', 'filesize': 57017, 'title': 'Cold Water (feat. Justin Bieber & M�)', 'album': 'Cold Water (feat. Justin Bieber & M�) - Single', 'year': '2016', 'samplerate': 44100, 'duration': 188.545, 'genre': 'Electronic;Music', 'albumartist': 'Major Lazer', 'channels': 2, 'bitrate': 303040.001, 'comment': '? 2016 Mad Decent'}),
])
testfolder = os.path.join(os.path.dirname(__file__))
# load custom samples
custom_samples_folder = os.path.join(testfolder, 'custom_samples')
pattern_field_name_type = [
(r'sr=(\d+)', 'samplerate', int),
(r'dn=(\d+)', 'disc', str),
(r'dt=(\d+)', 'disc_total', str),
(r'd=(\d+.?\d*)', 'duration', float),
(r'b=(\d+)', 'bitrate', int),
(r'c=(\d)', 'channels', int),
]
for filename in os.listdir(custom_samples_folder):
if filename == 'instructions.txt':
continue
if os.path.isdir(os.path.join(custom_samples_folder, filename)):
continue
expected_values = {}
for pattern, fieldname, _type in pattern_field_name_type:
match = re.findall(pattern, filename)
if match:
expected_values[fieldname] = _type(match[0])
if expected_values:
testfiles[os.path.join('custom_samples', filename)] = expected_values
else:
# if there are no expected values, just try parsing the file
testfiles[os.path.join('custom_samples', filename)] = {}
@pytest.mark.parametrize("testfile,expected", [
pytest.param(testfile, expected) for testfile, expected in testfiles.items()
])
def test_file_reading(testfile, expected):
filename = os.path.join(testfolder, testfile)
# print(filename)
tag = TinyTag.get(filename)
for key, expected_val in expected.items():
result = getattr(tag, key)
fmt_string = 'field "%s": got %s (%s) expected %s (%s)!'
fmt_values = (key, repr(result), type(result), repr(expected_val), type(expected_val))
if key == 'duration' and result is not None and expected_val is not None:
# allow duration to be off by 100 ms and a maximum of 1%
if abs(result - expected_val) < 0.100:
if expected_val and min(result, expected_val) / max(result, expected_val) > 0.99:
continue
assert result == expected_val, fmt_string % fmt_values
undefined_in_fixture = {}
for key, val in tag.__dict__.items():
if key.startswith('_') or val is None:
continue
if key not in expected:
undefined_in_fixture[key] = val
assert not undefined_in_fixture, 'Missing data in fixture \n%s' % str(undefined_in_fixture)
#
# def test_generator():
# for testfile, expected in testfiles.items():
# yield get_info, testfile, expected
def test_pathlib_compatibility():
try:
import pathlib
except ImportError:
return
testfile = next(iter(testfiles.keys()))
filename = pathlib.Path(testfolder) / testfile
tag = TinyTag.get(filename)
@pytest.mark.skipif(sys.platform == "win32", reason='Windows does not support binary paths')
def test_binary_path_compatibility():
binary_file_path = os.path.join(os.path.dirname(__file__).encode('utf-8'), b'\x01.mp3')
testfile = os.path.join(testfolder, next(iter(testfiles.keys())))
shutil.copy(testfile, binary_file_path)
assert os.path.exists(binary_file_path)
TinyTag.get(binary_file_path)
os.unlink(binary_file_path)
assert not os.path.exists(binary_file_path)
@pytest.mark.xfail(raises=TinyTagException)
def test_unsupported_extension():
bogus_file = os.path.join(testfolder, 'samples/there_is_no_such_ext.bogus')
TinyTag.get(bogus_file)
@pytest.mark.xfail(raises=NotImplementedError)
def test_unsubclassed_tinytag_duration():
tag = TinyTag(None, 0)
tag._determine_duration(None)
@pytest.mark.xfail(raises=NotImplementedError)
def test_unsubclassed_tinytag_parse_tag():
tag = TinyTag(None, 0)
tag._parse_tag(None)
def test_mp3_length_estimation():
ID3.set_estimation_precision(0.7)
tag = TinyTag.get(os.path.join(testfolder, 'samples/silence-44-s-v1.mp3'))
assert 3.5 < tag.duration < 4.0
@pytest.mark.xfail(raises=TinyTagException)
def test_unexpected_eof():
tag = ID3.get(os.path.join(testfolder, 'samples/incomplete.mp3'))
@pytest.mark.xfail(raises=TinyTagException)
def test_invalid_flac_file():
tag = Flac.get(os.path.join(testfolder, 'samples/silence-44-s-v1.mp3'))
@pytest.mark.xfail(raises=TinyTagException)
def test_invalid_mp3_file():
tag = ID3.get(os.path.join(testfolder, 'samples/flac1.5sStereo.flac'))
@pytest.mark.xfail(raises=TinyTagException)
def test_invalid_ogg_file():
tag = Ogg.get(os.path.join(testfolder, 'samples/flac1.5sStereo.flac'))
@pytest.mark.xfail(raises=TinyTagException)
def test_invalid_wave_file():
tag = Wave.get(os.path.join(testfolder, 'samples/flac1.5sStereo.flac'))
def test_unpad():
# make sure that unpad only removes trailing 0-bytes
assert TinyTag._unpad('foo\x00') == 'foo'
assert TinyTag._unpad('foo\x00bar\x00') == 'foobar'
def test_mp3_image_loading():
tag = TinyTag.get(os.path.join(testfolder, 'samples/cover_img.mp3'), image=True)
image_data = tag.get_image()
assert image_data is not None
assert 140000 < len(image_data) < 150000, 'Image is %d bytes but should be around 145kb' % len(image_data)
assert image_data.startswith(b'\xff\xd8\xff\xe0'), 'The image data must start with a jpeg header'
def test_mp3_id3v22_image_loading():
tag = TinyTag.get(os.path.join(testfolder, 'samples/id3v22_image.mp3'), image=True)
image_data = tag.get_image()
assert image_data is not None
assert 18000 < len(image_data) < 19000, 'Image is %d bytes but should be around 18.1kb' % len(image_data)
assert image_data.startswith(b'\xff\xd8\xff\xe0'), 'The image data must start with a jpeg header'
def test_mp3_image_loading_without_description():
tag = TinyTag.get(os.path.join(testfolder, 'samples/id3image_without_description.mp3'), image=True)
image_data = tag.get_image()
assert image_data is not None
assert 28600 < len(image_data) < 28700, 'Image is %d bytes but should be around 28.6kb' % len(image_data)
assert image_data.startswith(b'\xff\xd8\xff\xe0'), 'The image data must start with a jpeg header'
def test_mp3_utf_8_invalid_string_raises_exception():
with raises(TinyTagException):
tag = TinyTag.get(os.path.join(testfolder, 'samples/utf-8-id3v2-invalid-string.mp3'))
def test_mp3_utf_8_invalid_string_can_be_ignored():
tag = TinyTag.get(os.path.join(testfolder, 'samples/utf-8-id3v2-invalid-string.mp3'), ignore_errors=True)
# the title used to be Gran dia, but I replaced the first byte with 0xFF, which should be ignored here
assert tag.title == 'ran día'
def test_mp4_image_loading():
tag = TinyTag.get(os.path.join(testfolder, 'samples/iso8859_with_image.m4a'), image=True)
image_data = tag.get_image()
assert image_data is not None
assert 20000 < len(image_data) < 25000, 'Image is %d bytes but should be around 22kb' % len(image_data)
def test_flac_image_loading():
tag = TinyTag.get(os.path.join(testfolder, 'samples/flac_with_image.flac'), image=True)
image_data = tag.get_image()
assert image_data is not None
assert 70000 < len(image_data) < 80000, 'Image is %d bytes but should be around 75kb' % len(image_data)
@pytest.mark.parametrize("testfile,expected", [
pytest.param(testfile, expected) for testfile, expected in [
('samples/detect_mp3_id3.x', ID3),
('samples/detect_mp3_fffb.x', ID3),
('samples/detect_ogg.x', Ogg),
('samples/detect_wav.x', Wave),
('samples/detect_flac.x', Flac),
('samples/detect_wma.x', Wma),
('samples/detect_mp4_m4a.x', MP4),
]
])
def test_detect_magic_headers(testfile, expected):
filename = os.path.join(testfolder, testfile)
with io.open(filename, 'rb') as fh:
parser = TinyTag.get_parser_class(filename, fh)
assert parser == expected
@pytest.mark.xfail(raises=Exception)
def test_show_hint_for_wrong_usage():
TinyTag('filename.mp3', 0)
def test_to_str():
tag = TinyTag.get(os.path.join(testfolder, 'samples/id3v22-test.mp3'))
assert str(tag) # since the dict is not ordered we cannot == 'somestring'
assert repr(tag) # since the dict is not ordered we cannot == 'somestring'
assert str(tag) == '{"album": "Hymns for the Exiled", "albumartist": null, "artist": "<NAME>", "audio_offset": 2225, "bitrate": 160, "channels": 2, "comment": "Waterbug Records, www.anaismitchell.com", "composer": null, "disc": null, "disc_total": null, "duration": 0.13836297152858082, "extra": {}, "filesize": 5120, "genre": null, "samplerate": 44100, "title": "cosmic american", "track": "3", "track_total": "11", "year": "2004"}'
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# tests can be extended using other bigger files that are not going to be
# checked into git, by placing them into the custom_samples folder
#
# see custom_samples/instructions.txt
#
from __future__ import unicode_literals
import io
import os
import shutil
import sys
import tempfile
import pytest
import re
from pytest import raises
from tinytag import TinyTagException, TinyTag, ID3, Ogg, Wave, Flac
from tinytag.tinytag import Wma, MP4
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # python 2.6 and 3.2 compat
testfiles = OrderedDict([
# MP3
('samples/vbri.mp3', {'extra': {'url': ''}, 'channels': 2, 'samplerate': 44100, 'track_total': None, 'duration': 0.47020408163265304, 'album': 'I Can Walk On Water I Can Fly', 'year': '2007', 'title': 'I Can Walk On Water I Can Fly', 'artist': 'Basshunter', 'track': '01', 'filesize': 8192, 'audio_offset': 1007, 'genre': '(3)Dance', 'comment': '\ufeff\ufeffRipped by THSLIVE', 'composer': ''}),
('samples/cbr.mp3', {'extra': {}, 'channels': 2, 'samplerate': 44100, 'track_total': None, 'duration': 0.49, 'album': 'I Can Walk On Water I Can Fly', 'year': '2007', 'title': 'I Can Walk On Water I Can Fly', 'artist': 'Basshunter', 'track': '01', 'filesize': 8186, 'audio_offset': 246, 'bitrate': 128.0, 'genre': 'Dance', 'comment': 'Ripped by THSLIVE'}),
# the output of the lame encoder was 185.4 bitrate, but this is good enough for now
('samples/vbr_xing_header.mp3', {'extra': {}, 'bitrate': 186, 'channels': 1, 'samplerate': 44100, 'duration': 3.944489795918367, 'filesize': 91731, 'audio_offset': 441}),
('samples/vbr_xing_header_2channel.mp3', {'extra': {}, 'filesize': 2000, 'album': "The Harpers' Masque", 'artist': 'Knodel and Valencia', 'audio_offset': 694, 'bitrate': 46, 'channels': 2, 'duration': 250.04408163265308, 'samplerate': 22050, 'title': 'Lochaber No More', 'year': '1992'}),
('samples/id3v22-test.mp3', {'extra': {}, 'channels': 2, 'samplerate': 44100, 'track_total': '11', 'duration': 0.138, 'album': 'Hymns for the Exiled', 'year': '2004', 'title': 'cosmic american', 'artist': '<NAME>', 'track': '3', 'filesize': 5120, 'audio_offset': 2225, 'bitrate': 160.0, 'comment': 'Waterbug Records, www.anaismitchell.com'}),
('samples/silence-44-s-v1.mp3', {'extra': {}, 'channels': 2, 'samplerate': 44100, 'genre': 'Darkwave', 'track_total': None, 'duration': 3.7355102040816326, 'album': 'Quod Libet Test Data', 'year': '2004', 'title': 'Silence', 'artist': 'piman', 'track': '2', 'filesize': 15070, 'audio_offset': 0, 'bitrate': 32.0, 'comment': ''}),
('samples/id3v1-latin1.mp3', {'extra': {}, 'channels': None, 'samplerate': 44100, 'genre': 'Rock', 'samplerate': None, 'album': 'The Young Americans', 'title': 'Play Dead', 'filesize': 256, 'track': '12', 'artist': 'Björk', 'track_total': None, 'year': '1993', 'comment': ' '}),
('samples/UTF16.mp3', {'extra': {'text': 'MusicBrainz Artist Id664c3e0e-42d8-48c1-b209-1efca19c0325', 'url': 'WIKIPEDIA_RELEASEhttp://en.wikipedia.org/wiki/High_Violet'}, 'channels': None, 'samplerate': None, 'track_total': '11', 'track': '07', 'artist': 'The National', 'year': '2010', 'album': 'High Violet', 'title': 'Lemonworld', 'filesize': 20480, 'genre': 'Indie', 'comment': 'Track 7'}),
('samples/utf-8-id3v2.mp3', {'extra': {}, 'channels': None, 'samplerate': 44100, 'genre': 'Acustico', 'track_total': '21', 'track': '01', 'filesize': 2119, 'title': 'Gran día', 'artist': 'Paso a paso', 'album': 'S/T', 'year': None, 'samplerate': None, 'disc': '', 'disc_total': '0'}),
('samples/empty_file.mp3', {'extra': {}, 'channels': None, 'samplerate': None, 'track_total': None, 'album': None, 'year': None, 'title': None, 'track': None, 'artist': None, 'filesize': 0}),
('samples/silence-44khz-56k-mono-1s.mp3', {'extra': {}, 'channels': 1, 'samplerate': 44100, 'duration': 1.018, 'samplerate': 44100, 'filesize': 7280, 'audio_offset': 0, 'bitrate': 56.0}),
('samples/silence-22khz-mono-1s.mp3', {'extra': {}, 'channels': 1, 'samplerate': 22050, 'filesize': 4284, 'audio_offset': 0, 'bitrate': 32.0, 'duration': 1.0438932496075353}),
('samples/id3v24-long-title.mp3', {'extra': {}, 'track': '1', 'disc_total': '1', 'album': 'The Double EP: A Sea of Split Peas', 'filesize': 10000, 'channels': None, 'track_total': '12', 'genre': 'AlternRock', 'title': 'Out of the Woodwork', 'artist': '<NAME>', 'albumartist': '<NAME>', 'samplerate': None, 'year': None, 'disc': '1', 'comment': 'Amazon.com Song ID: 240853806', 'composer': '<NAME>'}),
('samples/utf16be.mp3', {'extra': {}, 'title': '52-girls', 'filesize': 2048, 'track': '6', 'album': 'party mix', 'artist': 'The B52s', 'genre': 'Rock', 'albumartist': None, 'disc': None, 'channels': None}),
('samples/id3v22_image.mp3', {'extra': {}, 'title': 'Kids (MGMT Cover) ', 'filesize': 35924, 'album': 'winniecooper.net ', 'artist': 'The Kooks', 'year': '2008', 'channels': None, 'genre': '.'}),
('samples/id3v22.TCO.genre.mp3', {'extra': {}, 'filesize': 500, 'album': 'ARTPOP', 'artist': 'Lady GaGa', 'comment': 'engiTunPGAP0', 'genre': 'Pop', 'title': 'Applause'}),
('samples/id3_comment_utf_16_with_bom.mp3', {'extra': {}, 'filesize': 19980, 'album': 'Ghosts I-IV', 'albumartist': 'Nine Inch Nails', 'artist': 'Nine Inch Nails', 'comment': '', 'disc': '1', 'disc_total': '2', 'title': '1 Ghosts I', 'track': '1', 'track_total': '36', 'year': '2008', 'comment': '3/4 time'}),
('samples/id3_comment_utf_16_double_bom.mp3', {'extra': {'text': 'LABEL\ufeffUnclear'}, 'filesize': 512, 'album': 'The Embrace', 'artist': '<NAME> & D.Diggler', 'comment': 'Unclear', 'title': 'The Embrace (Romano Alfieri Remix)', 'track': '04-johannes_heil_and_d.diggler-the_embrace_(romano_alfieri_remix)', 'year': '2012'}),
('samples/id3_genre_id_out_of_bounds.mp3', {'extra': {}, 'filesize': 512, 'album': 'MECHANICAL ANIMALS', 'artist': 'Manson', 'comment': '', 'genre': '(255)', 'title': '01 GREAT BIG WHITE WORLD', 'track': 'Marilyn', 'year': '0'}),
# OGG
('samples/empty.ogg', {'extra': {}, 'track_total': None, 'duration': 3.684716553287982, 'album': None, '_max_samplenum': 162496, 'year': None, 'title': None, 'artist': None, 'track': None, '_tags_parsed': False, 'filesize': 4328, 'audio_offset': 0, 'bitrate': 109.375, 'samplerate': 44100}),
('samples/multipagecomment.ogg', {'extra': {}, 'track_total': None, 'duration': 3.684716553287982, 'album': None, '_max_samplenum': 162496, 'year': None, 'title': None, 'artist': None, 'track': None, '_tags_parsed': False, 'filesize': 135694, 'audio_offset': 0, 'bitrate': 109.375, 'samplerate': 44100}),
('samples/multipage-setup.ogg', {'extra': {}, 'genre': 'JRock', 'track_total': None, 'duration': 4.128798185941043, 'album': 'Timeless', 'year': '2006', 'title': 'Burst', 'artist': 'UVERworld', 'track': '7', '_tags_parsed': False, 'filesize': 76983, 'audio_offset': 0, 'bitrate': 156.25, 'samplerate': 44100}),
('samples/test.ogg', {'extra': {}, 'track_total': None, 'duration': 1.0, 'album': 'the boss', 'year': '2006', 'title': 'the boss', 'artist': '<NAME>', 'track': '1', '_tags_parsed': False, 'filesize': 7467, 'audio_offset': 0, 'bitrate': 156.25, 'samplerate': 44100, 'comment': 'hello!'}),
('samples/corrupt_metadata.ogg', {'extra': {}, 'filesize': 18648, 'audio_offset': 0, 'bitrate': 78.125, 'duration': 2.132358276643991, 'samplerate': 44100}),
('samples/composer.ogg', {'extra': {}, 'filesize': 4480, 'album': 'An Album', 'artist': 'An Artist', 'audio_offset': 0, 'bitrate': 109.375, 'duration': 3.684716553287982, 'genre': 'Some Genre', 'samplerate': 44100, 'title': 'A Title', 'track': '2', 'year': '2007', 'composer': 'some composer'}),
# OPUS
('samples/test.opus', {'extra': {}, 'albumartist': 'Alstroemeria Records', 'samplerate': 48000, 'channels': 2, 'track': '1', 'disc': '1', 'title': 'Bad Apple!!', 'duration': 2.0, 'year': '2008.05.25', 'filesize': 10000, 'artist': 'nomico', 'album': 'Exserens - A selection of Alstroemeria Records', 'comment': 'ARCD0018 - Lovelight'}),
('samples/8khz_5s.opus', {'extra': {}, 'filesize': 7251, 'channels': 1, 'samplerate': 48000, 'duration': 5.0}),
# WAV
('samples/test.wav', {'extra': {}, 'channels': 2, 'duration': 1.0, 'filesize': 176444, 'bitrate': 1378.125, 'samplerate': 44100, 'audio_offest': 36}),
('samples/test3sMono.wav', {'extra': {}, 'channels': 1, 'duration': 3.0, 'filesize': 264644, 'bitrate': 689.0625, 'duration': 3.0, 'samplerate': 44100, 'audio_offest': 36}),
('samples/test-tagged.wav', {'extra': {}, 'channels': 2, 'duration': 1.0, 'filesize': 176688, 'album': 'thealbum', 'artist': 'theartisst', 'bitrate': 1378.125, 'genre': 'Acid', 'samplerate': 44100, 'title': 'thetitle', 'track': '66', 'audio_offest': 36, 'comment': 'hello', 'year': '2014'}),
('samples/test-riff-tags.wav', {'extra': {}, 'channels': 2, 'duration': 1.0, 'filesize': 176540, 'album': None, 'artist': 'theartisst', 'bitrate': 1378.125, 'genre': 'Acid', 'samplerate': 44100, 'title': 'thetitle', 'track': None, 'audio_offest': 36, 'comment': 'hello', 'year': '2014'}),
('samples/silence-22khz-mono-1s.wav', {'extra': {}, 'channels': 1, 'duration': 1.0, 'filesize': 48160, 'bitrate': 344.53125, 'samplerate': 22050, 'audio_offest': 4088}),
('samples/id3_header_with_a_zero_byte.wav', {'extra': {}, 'channels': 1, 'duration': 1.0, 'filesize': 44280, 'bitrate': 344.53125, 'samplerate': 22050, 'audio_offest': 122, 'artist': 'Purpley', 'title': 'Test000', 'track': '17'}),
# FLAC
('samples/flac1sMono.flac', {'extra': {}, 'genre': 'Avantgarde', 'track_total': None, 'album': 'alb', 'year': '2014', 'duration': 1.0, 'title': 'track', 'track': '23', 'artist': 'art', 'channels': 1, 'filesize': 26632, 'bitrate': 208.0625, 'samplerate': 44100}),
('samples/flac453sStereo.flac', {'extra': {}, 'channels': 2, 'track_total': None, 'album': None, 'year': None, 'duration': 453.51473922902494, 'title': None, 'track': None, 'artist': None, 'filesize': 84236, 'bitrate': 1.45109671875, 'samplerate': 44100}),
('samples/flac1.5sStereo.flac', {'extra': {}, 'channels': 2, 'track_total': None, 'album': 'alb', 'year': '2014', 'duration': 1.4995238095238095, 'title': 'track', 'track': '23', 'artist': 'art', 'filesize': 59868, 'bitrate': 311.9115195300095, 'genre': 'Avantgarde', 'samplerate': 44100}),
('samples/flac_application.flac', {'extra': {}, 'channels': 2, 'track_total': '11', 'album': 'Belle and Sebastian Write About Love', 'year': '2010-10-11', 'duration': 273.64, 'title': 'I Want the World to Stop', 'track': '4', 'artist': 'Belle and Sebastian', 'filesize': 13000, 'bitrate': 0.37115370559859673, 'samplerate': 44100}),
('samples/no-tags.flac', {'extra': {}, 'channels': 2, 'track_total': None, 'album': None, 'year': None, 'duration': 3.684716553287982, 'title': None, 'track': None, 'artist': None, 'filesize': 4692, 'bitrate': 9.94818718614612, 'samplerate': 44100}),
('samples/variable-block.flac', {'extra': {}, 'channels': 2, 'album': 'Appleseed Original Soundtrack', 'year': '2004', 'duration': 261.68, 'title': 'DIVE FOR YOU', 'track': '01', 'track_total': '11', 'artist': 'Boom Boom Satellites', 'filesize': 10240, 'bitrate': 0.3057169061449098, 'disc': '1', 'genre': 'Anime Soundtrack', 'samplerate': 44100, 'composer': 'Boom Boom Satellites (Lyrics)', 'disc_total': '2'}),
('samples/106-invalid-streaminfo.flac', {'extra': {}, 'filesize': 4692}),
('samples/106-short-picture-block-size.flac', {'extra': {}, 'filesize': 4692, 'bitrate': 9.94818718614612, 'channels': 2, 'duration': 3.68, 'samplerate': 44100}),
('samples/with_id3_header.flac', {'extra': {}, 'filesize': 64837, 'album': ' ', 'artist': '群星', 'disc': '0', 'title': 'A 梦 哆啦 机器猫 短信铃声', 'track': '0', 'bitrate': 1116.9186328125, 'channels': 1, 'duration': 0.45351473922902497, 'genre': 'genre', 'samplerate': 44100, 'year': '2018'}),
('samples/with_padded_id3_header.flac', {'extra': {}, 'filesize': 16070, 'album': 'album', 'albumartist': None, 'artist': 'artist', 'audio_offset': None, 'bitrate': 276.830859375, 'channels': 1, 'comment': None, 'disc': None, 'disc_total': None, 'duration': 0.45351473922902497, 'genre': 'genre', 'samplerate': 44100, 'title': 'title', 'track': '1', 'track_total': None, 'year': '2018'}),
('samples/with_padded_id3_header2.flac', {'extra': {}, 'filesize': 19522, 'album': 'Unbekannter Titel', 'albumartist': None, 'artist': '<NAME>', 'audio_offset': None, 'bitrate': 336.29695312499996, 'channels': 1, 'comment': None, 'disc': '1', 'disc_total': '1', 'duration': 0.45351473922902497, 'genre': 'genre', 'samplerate': 44100, 'title': 'Track01', 'track': '01', 'track_total': '05', 'year': '2018'}),
('samples/flac_with_image.flac', {'extra': {}, 'filesize': 80000, 'album': 'smilin´ in circles', 'artist': '<NAME>', 'bitrate': 7.479655337482049, 'channels': 2, 'disc': '1', 'disc_total': '1', 'duration': 83.56, 'genre': 'Blues', 'samplerate': 44100, 'title': 'intro', 'track': '01', 'track_total': '8'}),
# WMA
('samples/test2.wma', {'extra': {}, 'samplerate': 44100, 'album': 'The Colour and the Shape', 'title': 'Doll', 'bitrate': 64.04, 'filesize': 5800, 'track': '1', 'albumartist': 'Foo Fighters', 'artist': '<NAME>', 'duration': 86.406, 'track_total': None, 'year': '1997', 'genre': 'Alternative', 'comment': '', 'composer': '<NAME>'}),
# M4A/MP4
('samples/test.m4a', {'extra': {}, 'samplerate': 44100, 'duration': 314.97, 'bitrate': 256.0, 'channels': 2, 'genre': 'Pop', 'year': '2011', 'title': 'Nothing', 'album': 'Only Our Hearts To Lose', 'track_total': '11', 'track': '11', 'artist': 'Marian', 'filesize': 61432}),
('samples/test2.m4a', {'extra': {}, 'bitrate': 256.0, 'track': '1', 'albumartist': "<NAME> - Get It Out 'cha System - 1978", 'duration': 167.78739229024944, 'filesize': 223365, 'channels': 2, 'year': '1978', 'artist': '<NAME>', 'track_total': '9', 'disc_total': '1', 'genre': 'R&B/Soul', 'album': "Get It Out 'cha System", 'samplerate': 44100, 'disc': '1', 'title': 'Go Out and Get Some', 'comment': "<NAME> - Get It Out 'cha System - 1978", 'composer': "<NAME> - Get It Out 'cha System - 1978"}),
('samples/iso8859_with_image.m4a', {'extra': {}, 'artist': 'Major Lazer', 'filesize': 57017, 'title': 'Cold Water (feat. Justin Bieber & M�)', 'album': 'Cold Water (feat. Justin Bieber & M�) - Single', 'year': '2016', 'samplerate': 44100, 'duration': 188.545, 'genre': 'Electronic;Music', 'albumartist': 'Major Lazer', 'channels': 2, 'bitrate': 303040.001, 'comment': '? 2016 Mad Decent'}),
])
testfolder = os.path.join(os.path.dirname(__file__))
# load custom samples
custom_samples_folder = os.path.join(testfolder, 'custom_samples')
pattern_field_name_type = [
(r'sr=(\d+)', 'samplerate', int),
(r'dn=(\d+)', 'disc', str),
(r'dt=(\d+)', 'disc_total', str),
(r'd=(\d+.?\d*)', 'duration', float),
(r'b=(\d+)', 'bitrate', int),
(r'c=(\d)', 'channels', int),
]
for filename in os.listdir(custom_samples_folder):
if filename == 'instructions.txt':
continue
if os.path.isdir(os.path.join(custom_samples_folder, filename)):
continue
expected_values = {}
for pattern, fieldname, _type in pattern_field_name_type:
match = re.findall(pattern, filename)
if match:
expected_values[fieldname] = _type(match[0])
if expected_values:
testfiles[os.path.join('custom_samples', filename)] = expected_values
else:
# if there are no expected values, just try parsing the file
testfiles[os.path.join('custom_samples', filename)] = {}
@pytest.mark.parametrize("testfile,expected", [
pytest.param(testfile, expected) for testfile, expected in testfiles.items()
])
def test_file_reading(testfile, expected):
filename = os.path.join(testfolder, testfile)
# print(filename)
tag = TinyTag.get(filename)
for key, expected_val in expected.items():
result = getattr(tag, key)
fmt_string = 'field "%s": got %s (%s) expected %s (%s)!'
fmt_values = (key, repr(result), type(result), repr(expected_val), type(expected_val))
if key == 'duration' and result is not None and expected_val is not None:
# allow duration to be off by 100 ms and a maximum of 1%
if abs(result - expected_val) < 0.100:
if expected_val and min(result, expected_val) / max(result, expected_val) > 0.99:
continue
assert result == expected_val, fmt_string % fmt_values
undefined_in_fixture = {}
for key, val in tag.__dict__.items():
if key.startswith('_') or val is None:
continue
if key not in expected:
undefined_in_fixture[key] = val
assert not undefined_in_fixture, 'Missing data in fixture \n%s' % str(undefined_in_fixture)
#
# def test_generator():
# for testfile, expected in testfiles.items():
# yield get_info, testfile, expected
def test_pathlib_compatibility():
try:
import pathlib
except ImportError:
return
testfile = next(iter(testfiles.keys()))
filename = pathlib.Path(testfolder) / testfile
tag = TinyTag.get(filename)
@pytest.mark.skipif(sys.platform == "win32", reason='Windows does not support binary paths')
def test_binary_path_compatibility():
binary_file_path = os.path.join(os.path.dirname(__file__).encode('utf-8'), b'\x01.mp3')
testfile = os.path.join(testfolder, next(iter(testfiles.keys())))
shutil.copy(testfile, binary_file_path)
assert os.path.exists(binary_file_path)
TinyTag.get(binary_file_path)
os.unlink(binary_file_path)
assert not os.path.exists(binary_file_path)
@pytest.mark.xfail(raises=TinyTagException)
def test_unsupported_extension():
bogus_file = os.path.join(testfolder, 'samples/there_is_no_such_ext.bogus')
TinyTag.get(bogus_file)
@pytest.mark.xfail(raises=NotImplementedError)
def test_unsubclassed_tinytag_duration():
tag = TinyTag(None, 0)
tag._determine_duration(None)
@pytest.mark.xfail(raises=NotImplementedError)
def test_unsubclassed_tinytag_parse_tag():
tag = TinyTag(None, 0)
tag._parse_tag(None)
def test_mp3_length_estimation():
ID3.set_estimation_precision(0.7)
tag = TinyTag.get(os.path.join(testfolder, 'samples/silence-44-s-v1.mp3'))
assert 3.5 < tag.duration < 4.0
@pytest.mark.xfail(raises=TinyTagException)
def test_unexpected_eof():
tag = ID3.get(os.path.join(testfolder, 'samples/incomplete.mp3'))
@pytest.mark.xfail(raises=TinyTagException)
def test_invalid_flac_file():
tag = Flac.get(os.path.join(testfolder, 'samples/silence-44-s-v1.mp3'))
@pytest.mark.xfail(raises=TinyTagException)
def test_invalid_mp3_file():
tag = ID3.get(os.path.join(testfolder, 'samples/flac1.5sStereo.flac'))
@pytest.mark.xfail(raises=TinyTagException)
def test_invalid_ogg_file():
tag = Ogg.get(os.path.join(testfolder, 'samples/flac1.5sStereo.flac'))
@pytest.mark.xfail(raises=TinyTagException)
def test_invalid_wave_file():
tag = Wave.get(os.path.join(testfolder, 'samples/flac1.5sStereo.flac'))
def test_unpad():
# make sure that unpad only removes trailing 0-bytes
assert TinyTag._unpad('foo\x00') == 'foo'
assert TinyTag._unpad('foo\x00bar\x00') == 'foobar'
def test_mp3_image_loading():
tag = TinyTag.get(os.path.join(testfolder, 'samples/cover_img.mp3'), image=True)
image_data = tag.get_image()
assert image_data is not None
assert 140000 < len(image_data) < 150000, 'Image is %d bytes but should be around 145kb' % len(image_data)
assert image_data.startswith(b'\xff\xd8\xff\xe0'), 'The image data must start with a jpeg header'
def test_mp3_id3v22_image_loading():
tag = TinyTag.get(os.path.join(testfolder, 'samples/id3v22_image.mp3'), image=True)
image_data = tag.get_image()
assert image_data is not None
assert 18000 < len(image_data) < 19000, 'Image is %d bytes but should be around 18.1kb' % len(image_data)
assert image_data.startswith(b'\xff\xd8\xff\xe0'), 'The image data must start with a jpeg header'
def test_mp3_image_loading_without_description():
tag = TinyTag.get(os.path.join(testfolder, 'samples/id3image_without_description.mp3'), image=True)
image_data = tag.get_image()
assert image_data is not None
assert 28600 < len(image_data) < 28700, 'Image is %d bytes but should be around 28.6kb' % len(image_data)
assert image_data.startswith(b'\xff\xd8\xff\xe0'), 'The image data must start with a jpeg header'
def test_mp3_utf_8_invalid_string_raises_exception():
with raises(TinyTagException):
tag = TinyTag.get(os.path.join(testfolder, 'samples/utf-8-id3v2-invalid-string.mp3'))
def test_mp3_utf_8_invalid_string_can_be_ignored():
tag = TinyTag.get(os.path.join(testfolder, 'samples/utf-8-id3v2-invalid-string.mp3'), ignore_errors=True)
# the title used to be Gran dia, but I replaced the first byte with 0xFF, which should be ignored here
assert tag.title == 'ran día'
def test_mp4_image_loading():
tag = TinyTag.get(os.path.join(testfolder, 'samples/iso8859_with_image.m4a'), image=True)
image_data = tag.get_image()
assert image_data is not None
assert 20000 < len(image_data) < 25000, 'Image is %d bytes but should be around 22kb' % len(image_data)
def test_flac_image_loading():
tag = TinyTag.get(os.path.join(testfolder, 'samples/flac_with_image.flac'), image=True)
image_data = tag.get_image()
assert image_data is not None
assert 70000 < len(image_data) < 80000, 'Image is %d bytes but should be around 75kb' % len(image_data)
@pytest.mark.parametrize("testfile,expected", [
pytest.param(testfile, expected) for testfile, expected in [
('samples/detect_mp3_id3.x', ID3),
('samples/detect_mp3_fffb.x', ID3),
('samples/detect_ogg.x', Ogg),
('samples/detect_wav.x', Wave),
('samples/detect_flac.x', Flac),
('samples/detect_wma.x', Wma),
('samples/detect_mp4_m4a.x', MP4),
]
])
def test_detect_magic_headers(testfile, expected):
filename = os.path.join(testfolder, testfile)
with io.open(filename, 'rb') as fh:
parser = TinyTag.get_parser_class(filename, fh)
assert parser == expected
@pytest.mark.xfail(raises=Exception)
def test_show_hint_for_wrong_usage():
TinyTag('filename.mp3', 0)
def test_to_str():
tag = TinyTag.get(os.path.join(testfolder, 'samples/id3v22-test.mp3'))
assert str(tag) # since the dict is not ordered we cannot == 'somestring'
assert repr(tag) # since the dict is not ordered we cannot == 'somestring'
assert str(tag) == '{"album": "Hymns for the Exiled", "albumartist": null, "artist": "<NAME>", "audio_offset": 2225, "bitrate": 160, "channels": 2, "comment": "Waterbug Records, www.anaismitchell.com", "composer": null, "disc": null, "disc_total": null, "duration": 0.13836297152858082, "extra": {}, "filesize": 5120, "genre": null, "samplerate": 44100, "title": "cosmic american", "track": "3", "track_total": "11", "year": "2004"}'
| en | 0.880599 | #!/usr/bin/python # -*- coding: utf-8 -*- # tests can be extended using other bigger files that are not going to be # checked into git, by placing them into the custom_samples folder # # see custom_samples/instructions.txt # # python 2.6 and 3.2 compat # MP3 # the output of the lame encoder was 185.4 bitrate, but this is good enough for now # OGG # OPUS # WAV # FLAC # WMA # M4A/MP4 # load custom samples # if there are no expected values, just try parsing the file # print(filename) # allow duration to be off by 100 ms and a maximum of 1% # # def test_generator(): # for testfile, expected in testfiles.items(): # yield get_info, testfile, expected # make sure that unpad only removes trailing 0-bytes # the title used to be Gran dia, but I replaced the first byte with 0xFF, which should be ignored here # since the dict is not ordered we cannot == 'somestring' # since the dict is not ordered we cannot == 'somestring' | 2.030362 | 2 |
glue/viewers/scatter/qt/layer_style_editor.py | sergiopasra/glue | 0 | 6633186 | from __future__ import absolute_import, division, print_function
import os
import numpy as np
from qtpy import QtWidgets, QtGui
from qtpy.QtCore import Qt
from glue.external.echo.qt import autoconnect_callbacks_to_qt, connect_value
from glue.utils.qt import load_ui, fix_tab_widget_fontsize
class ScatterLayerStyleEditor(QtWidgets.QWidget):
def __init__(self, layer, parent=None):
super(ScatterLayerStyleEditor, self).__init__(parent=parent)
self.ui = load_ui('layer_style_editor.ui', self,
directory=os.path.dirname(__file__))
connect_kwargs = {'alpha': dict(value_range=(0, 1)),
'size_scaling': dict(value_range=(0.1, 10), log=True),
'density_contrast': dict(value_range=(0, 1)),
'vector_scaling': dict(value_range=(0.1, 10), log=True)}
autoconnect_callbacks_to_qt(layer.state, self.ui, connect_kwargs)
connect_value(layer.state.viewer_state, 'dpi', self.ui.value_dpi,
value_range=(12, 144), log=True)
fix_tab_widget_fontsize(self.ui.tab_widget)
self.layer_state = layer.state
self.layer_state.add_callback('markers_visible', self._update_markers_visible)
self.layer_state.add_callback('line_visible', self._update_line_visible)
self.layer_state.add_callback('xerr_visible', self._update_xerr_visible)
self.layer_state.add_callback('yerr_visible', self._update_yerr_visible)
self.layer_state.add_callback('vector_visible', self._update_vectors_visible)
self.layer_state.add_callback('cmap_mode', self._update_cmap_mode)
self.layer_state.add_callback('size_mode', self._update_size_mode)
self.layer_state.add_callback('vector_mode', self._update_vector_mode)
self.layer_state.add_callback('density_map', self._update_size_mode)
self.layer_state.add_callback('density_map', self._update_warnings)
self.layer_state.add_callback('layer', self._update_warnings)
self._update_markers_visible()
self._update_line_visible()
self._update_xerr_visible()
self._update_yerr_visible()
self._update_vectors_visible()
self._update_size_mode()
self._update_vector_mode()
self._update_cmap_mode()
self._update_warnings()
def _update_warnings(self, *args):
if self.layer_state.layer is None:
n_points = 0
else:
n_points = np.product(self.layer_state.layer.shape)
warning = " (may be slow given data size)"
for combo, threshold in [(self.ui.combosel_size_mode, 10000),
(self.ui.combosel_cmap_mode, 50000)]:
if n_points > threshold and not self.layer_state.density_map:
for item in range(combo.count()):
text = combo.itemText(item)
if text != 'Fixed':
combo.setItemText(item, text + warning)
combo.setItemData(item, QtGui.QBrush(Qt.red), Qt.TextColorRole)
else:
for item in range(combo.count()):
text = combo.itemText(item)
if text != 'Fixed':
if warning in text:
combo.setItemText(item, text.replace(warning, ''))
combo.setItemData(item, QtGui.QBrush(), Qt.TextColorRole)
if n_points > 10000:
self.ui.label_warning_errorbar.show()
else:
self.ui.label_warning_errorbar.hide()
if n_points > 10000:
self.ui.label_warning_vector.show()
else:
self.ui.label_warning_vector.hide()
def _update_size_mode(self, size_mode=None):
visible = not self.layer_state.density_map and not self.layer_state.size_mode == 'Fixed'
self.ui.label_size_attribute.setVisible(visible)
self.ui.combosel_size_att.setVisible(visible)
self.ui.label_size_limits.setVisible(visible)
self.ui.valuetext_size_vmin.setVisible(visible)
self.ui.valuetext_size_vmax.setVisible(visible)
self.ui.button_flip_size.setVisible(visible)
visible = not self.layer_state.density_map and self.layer_state.size_mode == 'Fixed'
self.ui.value_size.setVisible(visible)
density = self.layer_state.density_map
self.ui.value_dpi.setVisible(density)
self.ui.label_dpi.setVisible(density)
self.ui.label_stretch.setVisible(density)
self.ui.combosel_stretch.setVisible(density)
self.ui.value_density_contrast.setVisible(density)
self.ui.label_contrast.setVisible(density)
self.ui.combosel_size_mode.setVisible(not density)
self.ui.value_size_scaling.setVisible(not density)
self.ui.label_size_mode.setVisible(not density)
self.ui.label_size_scaling.setVisible(not density)
self.ui.label_fill.setVisible(not density)
self.ui.bool_fill.setVisible(not density)
def _update_markers_visible(self, *args):
self.ui.combosel_size_mode.setEnabled(self.layer_state.markers_visible)
self.ui.value_size.setEnabled(self.layer_state.markers_visible)
self.ui.combosel_size_att.setEnabled(self.layer_state.markers_visible)
self.ui.valuetext_size_vmin.setEnabled(self.layer_state.markers_visible)
self.ui.valuetext_size_vmax.setEnabled(self.layer_state.markers_visible)
self.ui.button_flip_size.setEnabled(self.layer_state.markers_visible)
self.ui.value_size_scaling.setEnabled(self.layer_state.markers_visible)
self.ui.value_dpi.setEnabled(self.layer_state.markers_visible)
self.ui.combosel_stretch.setEnabled(self.layer_state.markers_visible)
self.ui.label_size_scaling.setEnabled(self.layer_state.markers_visible)
self.ui.combosel_points_mode.setEnabled(self.layer_state.markers_visible)
self.ui.value_density_contrast.setEnabled(self.layer_state.markers_visible)
def _update_line_visible(self, *args):
self.ui.value_linewidth.setEnabled(self.layer_state.line_visible)
self.ui.combosel_linestyle.setEnabled(self.layer_state.line_visible)
def _update_xerr_visible(self, *args):
self.ui.combosel_xerr_att.setEnabled(self.layer_state.xerr_visible)
def _update_yerr_visible(self, *args):
self.ui.combosel_yerr_att.setEnabled(self.layer_state.yerr_visible)
def _update_vectors_visible(self, *args):
self.ui.combosel_vector_mode.setEnabled(self.layer_state.vector_visible)
self.ui.combosel_vx_att.setEnabled(self.layer_state.vector_visible)
self.ui.combosel_vy_att.setEnabled(self.layer_state.vector_visible)
self.ui.value_vector_scaling.setEnabled(self.layer_state.vector_visible)
self.ui.combosel_vector_origin.setEnabled(self.layer_state.vector_visible)
self.ui.bool_vector_arrowhead.setEnabled(self.layer_state.vector_visible)
def _update_vector_mode(self, vector_mode=None):
if self.layer_state.vector_mode == 'Cartesian':
self.ui.label_vector_x.setText('vx')
self.ui.label_vector_y.setText('vy')
elif self.layer_state.vector_mode == 'Polar':
self.ui.label_vector_x.setText('angle (deg)')
self.ui.label_vector_y.setText('length')
def _update_cmap_mode(self, cmap_mode=None):
if self.layer_state.cmap_mode == 'Fixed':
self.ui.label_cmap_attribute.hide()
self.ui.combosel_cmap_att.hide()
self.ui.label_cmap_limits.hide()
self.ui.valuetext_cmap_vmin.hide()
self.ui.valuetext_cmap_vmax.hide()
self.ui.button_flip_cmap.hide()
self.ui.combodata_cmap.hide()
self.ui.label_colormap.hide()
self.ui.color_color.show()
else:
self.ui.label_cmap_attribute.show()
self.ui.combosel_cmap_att.show()
self.ui.label_cmap_limits.show()
self.ui.valuetext_cmap_vmin.show()
self.ui.valuetext_cmap_vmax.show()
self.ui.button_flip_cmap.show()
self.ui.combodata_cmap.show()
self.ui.label_colormap.show()
self.ui.color_color.hide()
| from __future__ import absolute_import, division, print_function
import os
import numpy as np
from qtpy import QtWidgets, QtGui
from qtpy.QtCore import Qt
from glue.external.echo.qt import autoconnect_callbacks_to_qt, connect_value
from glue.utils.qt import load_ui, fix_tab_widget_fontsize
class ScatterLayerStyleEditor(QtWidgets.QWidget):
def __init__(self, layer, parent=None):
super(ScatterLayerStyleEditor, self).__init__(parent=parent)
self.ui = load_ui('layer_style_editor.ui', self,
directory=os.path.dirname(__file__))
connect_kwargs = {'alpha': dict(value_range=(0, 1)),
'size_scaling': dict(value_range=(0.1, 10), log=True),
'density_contrast': dict(value_range=(0, 1)),
'vector_scaling': dict(value_range=(0.1, 10), log=True)}
autoconnect_callbacks_to_qt(layer.state, self.ui, connect_kwargs)
connect_value(layer.state.viewer_state, 'dpi', self.ui.value_dpi,
value_range=(12, 144), log=True)
fix_tab_widget_fontsize(self.ui.tab_widget)
self.layer_state = layer.state
self.layer_state.add_callback('markers_visible', self._update_markers_visible)
self.layer_state.add_callback('line_visible', self._update_line_visible)
self.layer_state.add_callback('xerr_visible', self._update_xerr_visible)
self.layer_state.add_callback('yerr_visible', self._update_yerr_visible)
self.layer_state.add_callback('vector_visible', self._update_vectors_visible)
self.layer_state.add_callback('cmap_mode', self._update_cmap_mode)
self.layer_state.add_callback('size_mode', self._update_size_mode)
self.layer_state.add_callback('vector_mode', self._update_vector_mode)
self.layer_state.add_callback('density_map', self._update_size_mode)
self.layer_state.add_callback('density_map', self._update_warnings)
self.layer_state.add_callback('layer', self._update_warnings)
self._update_markers_visible()
self._update_line_visible()
self._update_xerr_visible()
self._update_yerr_visible()
self._update_vectors_visible()
self._update_size_mode()
self._update_vector_mode()
self._update_cmap_mode()
self._update_warnings()
def _update_warnings(self, *args):
if self.layer_state.layer is None:
n_points = 0
else:
n_points = np.product(self.layer_state.layer.shape)
warning = " (may be slow given data size)"
for combo, threshold in [(self.ui.combosel_size_mode, 10000),
(self.ui.combosel_cmap_mode, 50000)]:
if n_points > threshold and not self.layer_state.density_map:
for item in range(combo.count()):
text = combo.itemText(item)
if text != 'Fixed':
combo.setItemText(item, text + warning)
combo.setItemData(item, QtGui.QBrush(Qt.red), Qt.TextColorRole)
else:
for item in range(combo.count()):
text = combo.itemText(item)
if text != 'Fixed':
if warning in text:
combo.setItemText(item, text.replace(warning, ''))
combo.setItemData(item, QtGui.QBrush(), Qt.TextColorRole)
if n_points > 10000:
self.ui.label_warning_errorbar.show()
else:
self.ui.label_warning_errorbar.hide()
if n_points > 10000:
self.ui.label_warning_vector.show()
else:
self.ui.label_warning_vector.hide()
def _update_size_mode(self, size_mode=None):
visible = not self.layer_state.density_map and not self.layer_state.size_mode == 'Fixed'
self.ui.label_size_attribute.setVisible(visible)
self.ui.combosel_size_att.setVisible(visible)
self.ui.label_size_limits.setVisible(visible)
self.ui.valuetext_size_vmin.setVisible(visible)
self.ui.valuetext_size_vmax.setVisible(visible)
self.ui.button_flip_size.setVisible(visible)
visible = not self.layer_state.density_map and self.layer_state.size_mode == 'Fixed'
self.ui.value_size.setVisible(visible)
density = self.layer_state.density_map
self.ui.value_dpi.setVisible(density)
self.ui.label_dpi.setVisible(density)
self.ui.label_stretch.setVisible(density)
self.ui.combosel_stretch.setVisible(density)
self.ui.value_density_contrast.setVisible(density)
self.ui.label_contrast.setVisible(density)
self.ui.combosel_size_mode.setVisible(not density)
self.ui.value_size_scaling.setVisible(not density)
self.ui.label_size_mode.setVisible(not density)
self.ui.label_size_scaling.setVisible(not density)
self.ui.label_fill.setVisible(not density)
self.ui.bool_fill.setVisible(not density)
def _update_markers_visible(self, *args):
self.ui.combosel_size_mode.setEnabled(self.layer_state.markers_visible)
self.ui.value_size.setEnabled(self.layer_state.markers_visible)
self.ui.combosel_size_att.setEnabled(self.layer_state.markers_visible)
self.ui.valuetext_size_vmin.setEnabled(self.layer_state.markers_visible)
self.ui.valuetext_size_vmax.setEnabled(self.layer_state.markers_visible)
self.ui.button_flip_size.setEnabled(self.layer_state.markers_visible)
self.ui.value_size_scaling.setEnabled(self.layer_state.markers_visible)
self.ui.value_dpi.setEnabled(self.layer_state.markers_visible)
self.ui.combosel_stretch.setEnabled(self.layer_state.markers_visible)
self.ui.label_size_scaling.setEnabled(self.layer_state.markers_visible)
self.ui.combosel_points_mode.setEnabled(self.layer_state.markers_visible)
self.ui.value_density_contrast.setEnabled(self.layer_state.markers_visible)
def _update_line_visible(self, *args):
self.ui.value_linewidth.setEnabled(self.layer_state.line_visible)
self.ui.combosel_linestyle.setEnabled(self.layer_state.line_visible)
def _update_xerr_visible(self, *args):
self.ui.combosel_xerr_att.setEnabled(self.layer_state.xerr_visible)
def _update_yerr_visible(self, *args):
self.ui.combosel_yerr_att.setEnabled(self.layer_state.yerr_visible)
def _update_vectors_visible(self, *args):
self.ui.combosel_vector_mode.setEnabled(self.layer_state.vector_visible)
self.ui.combosel_vx_att.setEnabled(self.layer_state.vector_visible)
self.ui.combosel_vy_att.setEnabled(self.layer_state.vector_visible)
self.ui.value_vector_scaling.setEnabled(self.layer_state.vector_visible)
self.ui.combosel_vector_origin.setEnabled(self.layer_state.vector_visible)
self.ui.bool_vector_arrowhead.setEnabled(self.layer_state.vector_visible)
def _update_vector_mode(self, vector_mode=None):
if self.layer_state.vector_mode == 'Cartesian':
self.ui.label_vector_x.setText('vx')
self.ui.label_vector_y.setText('vy')
elif self.layer_state.vector_mode == 'Polar':
self.ui.label_vector_x.setText('angle (deg)')
self.ui.label_vector_y.setText('length')
def _update_cmap_mode(self, cmap_mode=None):
if self.layer_state.cmap_mode == 'Fixed':
self.ui.label_cmap_attribute.hide()
self.ui.combosel_cmap_att.hide()
self.ui.label_cmap_limits.hide()
self.ui.valuetext_cmap_vmin.hide()
self.ui.valuetext_cmap_vmax.hide()
self.ui.button_flip_cmap.hide()
self.ui.combodata_cmap.hide()
self.ui.label_colormap.hide()
self.ui.color_color.show()
else:
self.ui.label_cmap_attribute.show()
self.ui.combosel_cmap_att.show()
self.ui.label_cmap_limits.show()
self.ui.valuetext_cmap_vmin.show()
self.ui.valuetext_cmap_vmax.show()
self.ui.button_flip_cmap.show()
self.ui.combodata_cmap.show()
self.ui.label_colormap.show()
self.ui.color_color.hide()
| none | 1 | 1.959808 | 2 |
|
Question7.py | Schrodinger73/PracticalJournal_Class11 | 0 | 6633187 | # Question:-
# WAP to generate 6 random numbers between 100 and 999 and then print their mean, median and mode
# CODE:-
# For this one, we gotta import random module (it is used to generate random numbers).
# Also added the statistics module for finding mean , median and mode
import random
import statistics
# Now, we will put our range in a variable so as to make it more easy to use.
# Notice that the question says b/w 100 and 999 i.e. these can't be included. So I had to put 101 as lower limit and since upper limit is not counted, I left it as 999.
given_range=range(101,999)
# Now, we have to use random.choices() method as we are trying to pick 6 numbers at random from given range.
# Also. we have to specify a value 'k' which tells how many times do we want a random number; in this case k=6.
# We can assign it a variable for easy use.
nums=random.choices(given_range,k=6)
# Finding the mode
print(statistics.mode(nums))
# Finding the mean
print(statistics.mean(nums))
# Finding the median
print(statistics.median(nums))
# No additional comments
| # Question:-
# WAP to generate 6 random numbers between 100 and 999 and then print their mean, median and mode
# CODE:-
# For this one, we gotta import random module (it is used to generate random numbers).
# Also added the statistics module for finding mean , median and mode
import random
import statistics
# Now, we will put our range in a variable so as to make it more easy to use.
# Notice that the question says b/w 100 and 999 i.e. these can't be included. So I had to put 101 as lower limit and since upper limit is not counted, I left it as 999.
given_range=range(101,999)
# Now, we have to use random.choices() method as we are trying to pick 6 numbers at random from given range.
# Also. we have to specify a value 'k' which tells how many times do we want a random number; in this case k=6.
# We can assign it a variable for easy use.
nums=random.choices(given_range,k=6)
# Finding the mode
print(statistics.mode(nums))
# Finding the mean
print(statistics.mean(nums))
# Finding the median
print(statistics.median(nums))
# No additional comments
| en | 0.935615 | # Question:- # WAP to generate 6 random numbers between 100 and 999 and then print their mean, median and mode # CODE:- # For this one, we gotta import random module (it is used to generate random numbers). # Also added the statistics module for finding mean , median and mode # Now, we will put our range in a variable so as to make it more easy to use. # Notice that the question says b/w 100 and 999 i.e. these can't be included. So I had to put 101 as lower limit and since upper limit is not counted, I left it as 999. # Now, we have to use random.choices() method as we are trying to pick 6 numbers at random from given range. # Also. we have to specify a value 'k' which tells how many times do we want a random number; in this case k=6. # We can assign it a variable for easy use. # Finding the mode # Finding the mean # Finding the median # No additional comments | 4.343805 | 4 |
couchdb_upgrade.py | pietervogelaar/couchdb_upgrade | 3 | 6633188 | <reponame>pietervogelaar/couchdb_upgrade
#!/usr/bin/env python
# couchdb_upgrade.py
# https://github.com/pietervogelaar/couchdb_upgrade
#
# Performs a rolling upgrade of a CouchDB cluster
#
# Installing dependencies:
# pip install requests
#
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import datetime
import json
import re
import requests
import subprocess
import sys
import time
from distutils.version import StrictVersion
from requests.auth import HTTPBasicAuth
from requests.exceptions import ConnectionError
class CouchDbUpgrader:
"""
Performs a rolling upgrade of a CouchDB cluster
"""
def __init__(self,
nodes,
username=None,
password=<PASSWORD>,
port=5984,
ssl=False,
service_stop_command='sudo systemctl stop couchdb',
service_start_command='sudo systemctl start couchdb',
upgrade_command='sudo yum clean all && sudo yum install -y couchdb',
latest_version_command="sudo yum clean all >/dev/null 2>&1 && yum list all couchdb |"
" grep couchdb | awk '{ print $2 }' | cut -d '-' -f1 |"
" sort --version-sort -r | head -n 1",
check_stable_command="stable=$(grep 'publish cluster `stable` event' /var/log/couchdb/couchdb.log |"
" while read -r line; do timestamp=$(echo $line | awk '{ print $2 }'); if ["
" \"$(date -d\"$timestamp\" +'%Y%m%d%H%M%S')\" -ge \"{service_start_time}\" ];"
" then echo 'yes'; fi; done); if [ \"$stable\" != \"yes\" ]; then exit 1; fi",
version='latest',
upgrade_system_command='sudo yum clean all && sudo yum update -y',
upgrade_system=False,
reboot=False,
force_reboot=False,
verbose=False,
):
"""
Constructor
:param nodes: list Host names or IP addresses of nodes
:param username: string
:param password: <PASSWORD>
:param port: int
:param ssl: bool
:param service_stop_command: string
:param service_start_command: string
:param upgrade_command: string
:param latest_version_command: string
:param check_stable_command: string
:param version: string
:param upgrade_system_command: string
:param upgrade_system: string
:param reboot: bool
:param force_reboot: bool
:param verbose: bool
"""
self._nodes = nodes
self._username = username
self._password = password
self._port = port
self._ssl = ssl
self._service_stop_command = service_stop_command
self._service_start_command = service_start_command
self._upgrade_command = upgrade_command
self._latest_version_command = latest_version_command
self._check_stable_command = check_stable_command
self._version = version
self._upgrade_system_command = upgrade_system_command
self._upgrade_system = upgrade_system
self._reboot = reboot
self._force_reboot = force_reboot
self._verbose = verbose
# Internal class attributes
self._service_start_time = None
self._rebooting = False
self._couchdb_upgrades_available = False
self._os_upgrades_available = False
def verbose_response(self, response):
if self._verbose:
print('Response status code: {}'.format(response.status_code))
print('Response headers: {}'.format(response.headers))
print('Response content: {}'.format(response.text))
def current_version_lower(self, node):
"""
Checks if the current version of CouchDB on the node
is lower than the version to upgrade to
:param node: string
:return: bool
"""
response = requests.get(self.get_node_url(node))
self.verbose_response(response)
if response.status_code == 200:
data = response.json()
if 'version' in data:
if StrictVersion(data['version']) == StrictVersion(self._version):
print('Skipping upgrade, the current version {} is the same as the version to upgrade to'
.format(data['version']))
return False
elif StrictVersion(data['version']) > StrictVersion(self._version):
print('Skipping upgrade, the current version {} is higher than version {} to upgrade to'
.format(data['version'], self._version))
return False
else:
print('The current version {} is lower than version {} to upgrade to'
.format(data['version'], self._version))
return True
else:
sys.stderr.write("Could not determine the current version\n")
else:
sys.stderr.write("Could not retrieve the current version\n")
return False
def stop_service(self, node):
"""
Stops the CouchDB service on the node
:param node: string
:return: bool
"""
result = self.ssh_command(node, self._service_stop_command)
if result['exit_code'] != 0:
return False
return True
def upgrade_couchdb(self, node):
"""
Upgrades the CouchDB software on the node
:param node: string
:return: bool
"""
result = self.ssh_command(node, self._upgrade_command)
if self._verbose:
print('stdout:')
print(result['stdout'])
print('stderr:')
print(result['stderr'])
if result['exit_code'] != 0:
return False
if 'Nothing to do' in result['stdout']:
self._couchdb_upgrades_available = False
else:
self._couchdb_upgrades_available = True
return True
def upgrade_system(self, node):
"""
Upgrades the operating system
:param node: string
:return: bool
"""
result = self.ssh_command(node, self._upgrade_system_command)
if self._verbose:
print('stdout:')
print(result['stdout'])
print('stderr:')
print(result['stderr'])
if result['exit_code'] != 0:
return False
if 'No packages marked for update' in result['stdout']:
self._os_upgrades_available = False
else:
self._os_upgrades_available = True
return True
def start_service(self, node):
"""
Starts the CouchDB service on the node
:param node: string
:return: bool
"""
self._service_start_time = datetime.datetime.now()
result = self.ssh_command(node, self._service_start_command)
if result['exit_code'] != 0:
return False
return True
def wait_until_joined(self, node):
"""
Waits until the node joined the cluster
:param node:
:return: bool
"""
print('- Waiting until node joins the cluster')
while True:
time.sleep(5)
url = '{}/_membership'.format(self.get_node_url(node))
try:
if self._username:
auth = HTTPBasicAuth(self._username, self._password)
else:
auth = None
response = requests.get(url, auth=auth)
self.verbose_response(response)
if response.status_code == 200:
data = response.json()
if ('all_nodes' in data and
any(node in s for s in data['all_nodes']) and
'cluster_nodes' in data and
any(node in s for s in data['cluster_nodes'])):
if self._verbose:
print("Node joined the cluster")
else:
sys.stdout.write(".\n")
sys.stdout.flush()
return True
except ConnectionError as exception:
if self._verbose:
print('Could not connect to node')
if self._verbose:
print("Node hasn't joined the cluster yet")
else:
sys.stdout.write('.')
sys.stdout.flush()
def wait_until_status_stable(self, node):
"""
Waits until the cluster status is stable
:param node:
:return: bool
"""
print('- Waiting until cluster status is stable')
while True:
time.sleep(5)
url = '{}/_up'.format(self.get_node_url(node))
try:
if self._username:
auth = HTTPBasicAuth(self._username, self._password)
else:
auth = None
response = requests.get(url, auth=auth)
self.verbose_response(response)
if response.status_code == 200:
data = response.json()
if data['status'] == 'ok':
if self._verbose:
print("Cluster status is OK")
else:
sys.stdout.write(".\n")
sys.stdout.flush()
return True
except ConnectionError as exception:
if self._verbose:
print('Could not connect to node')
return True
if self._verbose:
print('Cluster status is not stable yet')
else:
sys.stdout.write('.')
sys.stdout.flush()
def get_latest_version(self, node):
"""
Gets the latest version available in the repository
:param node: string
:return: bool
"""
result = self.ssh_command(node, self._latest_version_command)
if result['exit_code'] != 0:
return False
latest_version = result['stdout'].strip()
if StrictVersion(latest_version) > StrictVersion('0.0.0'):
return latest_version
return False
def reboot(self, node):
print('- Rebooting')
self._rebooting = True
self.ssh_command(node, 'sudo /sbin/shutdown -r now')
def get_node_url(self, node):
"""
Gets a node URL
:param node: string
:return: string
"""
if self._ssl:
protocol = 'https'
else:
protocol = 'http'
return '{}://{}:{}'.format(protocol, node, self._port)
def ssh_command(self, host, command):
"""
Executes a SSH command
:param host: string
:param command: string
:return: dict
"""
p = subprocess.Popen(['ssh', '%s' % host, command],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = p.stdout.readlines()
stderr = p.stderr.readlines()
stdout_string = ''.join(stdout)
stderr_string = ''.join(stderr)
# Remove clutter
regex = re.compile(r"Connection .+? closed by remote host\.\n?", re.IGNORECASE)
stderr_string = regex.sub('', stderr_string).strip()
if stderr_string:
sys.stderr.write("SSH error from host {}: {}\n".format(host, stderr_string))
# Make a return code available
p.communicate()[0]
result = {
'stdout': stdout_string,
'stderr': stderr_string,
'exit_code': p.returncode,
}
return result
def upgrade_node(self, node):
print('# Node {}'.format(node))
self._service_start_time = datetime.datetime.now()
self._rebooting = False
if self._version:
# Only upgrade node if the current version is lower than the version to upgrade to
if not self.current_version_lower(node):
# CouchDB already up to date
if self._upgrade_system:
print('- Upgrading operating system')
if not self.upgrade_system(node):
sys.stderr.write("Failed to upgrade operating system\n")
return False
else:
if not self._os_upgrades_available:
print('No operating system upgrades available')
if self._force_reboot or (self._reboot and self._os_upgrades_available):
self.reboot(node)
else:
return True
if not self._rebooting:
# Stop CouchDB service
print('- Stopping CouchDB service')
if not self.stop_service(node):
sys.stderr.write("Failed to stop CouchDB service\n")
return False
# Upgrade the CouchDB software
print('- Upgrading CouchDB software')
if not self.upgrade_couchdb(node):
sys.stderr.write("Failed to upgrade CouchDB software\n")
return False
if self._upgrade_system:
print('- Upgrading operating system')
if not self.upgrade_system(node):
sys.stderr.write("Failed to upgrade operating system\n")
return False
else:
if not self._os_upgrades_available:
print('No operating system upgrades available')
if (self._force_reboot or
(self._reboot and (self._couchdb_upgrades_available or self._os_upgrades_available))):
self.reboot(node)
if not self._rebooting:
# Start CouchDB service
print('- Starting CouchDB service')
if not self.start_service(node):
sys.stderr.write("Failed to start CouchDB service\n")
return False
self.wait_until_joined(node)
self.wait_until_status_stable(node)
return True
def upgrade(self):
print('Performing a rolling upgrade of the CouchDB cluster')
if self._verbose:
print('Cluster nodes: {}'.format(json.dumps(self._nodes)))
if self._version == 'latest':
print('Determining the latest version')
latest_version = self.get_latest_version(self._nodes[0])
if latest_version:
print('Using latest version {} as version to upgrade to'.format(latest_version))
self._version = latest_version
else:
sys.stderr.write("Failed to determine the latest version\n")
return False
for node in self._nodes:
if not self.upgrade_node(node):
sys.stderr.write("Failed to patch the CouchDB cluster\n")
return False
print ('Successfully upgraded all nodes of the CouchDB cluster')
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Performs a rolling upgrade of a CouchDB cluster')
parser.add_argument('-n', '--nodes', help='Comma separated list of host names or IP addresses of nodes',
required=True)
parser.add_argument('-u', '--username', help="Username for authentication")
parser.add_argument('-P', '--password', help="Password for authentication")
parser.add_argument('-p', '--port', help='CouchDB HTTP port. Default 5984', type=int, default=5984)
parser.add_argument('-s', '--ssl', help='Connect with https', action='store_true')
parser.add_argument('--service-stop-command',
help="Shell command to stop the CouchDB service on a node. "
"Default 'sudo systemctl stop couchdb'",
default='sudo systemctl stop couchdb')
parser.add_argument('--service-start-command',
help="Shell command to start the CouchDB service on a node. "
"Default 'sudo systemctl start couchdb'",
default='sudo systemctl start couchdb')
parser.add_argument('--upgrade-command',
help="Command to upgrade CouchDB on a node. "
"Default 'sudo yum clean all && sudo yum install -y couchdb'",
default='sudo yum clean all && sudo yum install -y couchdb')
parser.add_argument('--latest-version-command',
help="Command to get the latest version in the repository. "
"Default \"sudo yum clean all >/dev/null 2>&1 && sudo yum list all couchdb |"
" grep couchdb | awk '{ print $2 }' | cut -d '-' -f1 | sort --version-sort -r |"
" head -n 1\"",
default="sudo yum clean all >/dev/null 2>&1 && sudo yum list all couchdb |"
" grep couchdb | awk '{ print $2 }' | cut -d '-' -f1 | sort --version-sort -r |"
" head -n 1")
parser.add_argument('--check-stable-command',
help="Command to check if the cluster status is stable again after a node that"
" rejoined the cluster. Default \"stable=$(grep 'publish cluster `stable` event'"
" /var/log/couchdb/couchdb.log | while read -r line; do timestamp=$(echo $line |"
" awk '{ print $2 }'); if [ \"$(date -d\"$timestamp\" +'%%Y%%m%%d%%H%%M%%S')\" -ge"
" \"{service_start_time}\" ]; then echo 'yes'; fi; done); if [ \"$stable\" != \"yes\" ];"
" then exit 1; fi\"",
default="stable=$(grep 'publish cluster `stable` event' /var/log/couchdb/couchdb.log |"
" while read -r line; do timestamp=$(echo $line | awk '{ print $2 }'); if ["
" \"$(date -d\"$timestamp\" +'%Y%m%d%H%M%S')\" -ge \"{service_start_time}\" ];"
" then echo 'yes'; fi; done); if [ \"$stable\" != \"yes\" ]; then exit 1; fi")
parser.add_argument('--version',
help="A specific version to upgrade to or 'latest'. If 'latest', then the highest"
" available version in the repository will be determined. Nodes with a version"
" equal or higher will be skipped. Default 'latest'",
default='latest')
parser.add_argument('--upgrade-system-command',
help="Command to upgrade operating system. Default 'sudo yum clean all && sudo yum update -y'",
default='sudo yum clean all && sudo yum update -y')
parser.add_argument('--upgrade-system', help='Upgrades the operating system also after upgrading CouchDB',
action='store_true')
parser.add_argument('--reboot', help='Reboots the server if an actual upgrade took place', action='store_true')
parser.add_argument('--force-reboot', help='Always reboots the server, even though no upgrade occurred because'
' the version was already the latest', action='store_true')
parser.add_argument('-v', '--verbose', help='Display of more information', action='store_true')
args = parser.parse_args()
# Create nodes list from comma separated string
nodes = args.nodes.replace(' ', '').split(',')
couchdb_upgrader = CouchDbUpgrader(nodes,
args.username,
args.password,
args.port,
args.ssl,
args.service_stop_command,
args.service_start_command,
args.upgrade_command,
args.latest_version_command,
args.check_stable_command,
args.version,
args.upgrade_system_command,
args.upgrade_system,
args.reboot,
args.force_reboot,
args.verbose)
if not couchdb_upgrader.upgrade():
exit(1)
| #!/usr/bin/env python
# couchdb_upgrade.py
# https://github.com/pietervogelaar/couchdb_upgrade
#
# Performs a rolling upgrade of a CouchDB cluster
#
# Installing dependencies:
# pip install requests
#
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import datetime
import json
import re
import requests
import subprocess
import sys
import time
from distutils.version import StrictVersion
from requests.auth import HTTPBasicAuth
from requests.exceptions import ConnectionError
class CouchDbUpgrader:
"""
Performs a rolling upgrade of a CouchDB cluster
"""
def __init__(self,
nodes,
username=None,
password=<PASSWORD>,
port=5984,
ssl=False,
service_stop_command='sudo systemctl stop couchdb',
service_start_command='sudo systemctl start couchdb',
upgrade_command='sudo yum clean all && sudo yum install -y couchdb',
latest_version_command="sudo yum clean all >/dev/null 2>&1 && yum list all couchdb |"
" grep couchdb | awk '{ print $2 }' | cut -d '-' -f1 |"
" sort --version-sort -r | head -n 1",
check_stable_command="stable=$(grep 'publish cluster `stable` event' /var/log/couchdb/couchdb.log |"
" while read -r line; do timestamp=$(echo $line | awk '{ print $2 }'); if ["
" \"$(date -d\"$timestamp\" +'%Y%m%d%H%M%S')\" -ge \"{service_start_time}\" ];"
" then echo 'yes'; fi; done); if [ \"$stable\" != \"yes\" ]; then exit 1; fi",
version='latest',
upgrade_system_command='sudo yum clean all && sudo yum update -y',
upgrade_system=False,
reboot=False,
force_reboot=False,
verbose=False,
):
"""
Constructor
:param nodes: list Host names or IP addresses of nodes
:param username: string
:param password: <PASSWORD>
:param port: int
:param ssl: bool
:param service_stop_command: string
:param service_start_command: string
:param upgrade_command: string
:param latest_version_command: string
:param check_stable_command: string
:param version: string
:param upgrade_system_command: string
:param upgrade_system: string
:param reboot: bool
:param force_reboot: bool
:param verbose: bool
"""
self._nodes = nodes
self._username = username
self._password = password
self._port = port
self._ssl = ssl
self._service_stop_command = service_stop_command
self._service_start_command = service_start_command
self._upgrade_command = upgrade_command
self._latest_version_command = latest_version_command
self._check_stable_command = check_stable_command
self._version = version
self._upgrade_system_command = upgrade_system_command
self._upgrade_system = upgrade_system
self._reboot = reboot
self._force_reboot = force_reboot
self._verbose = verbose
# Internal class attributes
self._service_start_time = None
self._rebooting = False
self._couchdb_upgrades_available = False
self._os_upgrades_available = False
def verbose_response(self, response):
if self._verbose:
print('Response status code: {}'.format(response.status_code))
print('Response headers: {}'.format(response.headers))
print('Response content: {}'.format(response.text))
def current_version_lower(self, node):
"""
Checks if the current version of CouchDB on the node
is lower than the version to upgrade to
:param node: string
:return: bool
"""
response = requests.get(self.get_node_url(node))
self.verbose_response(response)
if response.status_code == 200:
data = response.json()
if 'version' in data:
if StrictVersion(data['version']) == StrictVersion(self._version):
print('Skipping upgrade, the current version {} is the same as the version to upgrade to'
.format(data['version']))
return False
elif StrictVersion(data['version']) > StrictVersion(self._version):
print('Skipping upgrade, the current version {} is higher than version {} to upgrade to'
.format(data['version'], self._version))
return False
else:
print('The current version {} is lower than version {} to upgrade to'
.format(data['version'], self._version))
return True
else:
sys.stderr.write("Could not determine the current version\n")
else:
sys.stderr.write("Could not retrieve the current version\n")
return False
def stop_service(self, node):
"""
Stops the CouchDB service on the node
:param node: string
:return: bool
"""
result = self.ssh_command(node, self._service_stop_command)
if result['exit_code'] != 0:
return False
return True
def upgrade_couchdb(self, node):
"""
Upgrades the CouchDB software on the node
:param node: string
:return: bool
"""
result = self.ssh_command(node, self._upgrade_command)
if self._verbose:
print('stdout:')
print(result['stdout'])
print('stderr:')
print(result['stderr'])
if result['exit_code'] != 0:
return False
if 'Nothing to do' in result['stdout']:
self._couchdb_upgrades_available = False
else:
self._couchdb_upgrades_available = True
return True
def upgrade_system(self, node):
"""
Upgrades the operating system
:param node: string
:return: bool
"""
result = self.ssh_command(node, self._upgrade_system_command)
if self._verbose:
print('stdout:')
print(result['stdout'])
print('stderr:')
print(result['stderr'])
if result['exit_code'] != 0:
return False
if 'No packages marked for update' in result['stdout']:
self._os_upgrades_available = False
else:
self._os_upgrades_available = True
return True
def start_service(self, node):
"""
Starts the CouchDB service on the node
:param node: string
:return: bool
"""
self._service_start_time = datetime.datetime.now()
result = self.ssh_command(node, self._service_start_command)
if result['exit_code'] != 0:
return False
return True
def wait_until_joined(self, node):
"""
Waits until the node joined the cluster
:param node:
:return: bool
"""
print('- Waiting until node joins the cluster')
while True:
time.sleep(5)
url = '{}/_membership'.format(self.get_node_url(node))
try:
if self._username:
auth = HTTPBasicAuth(self._username, self._password)
else:
auth = None
response = requests.get(url, auth=auth)
self.verbose_response(response)
if response.status_code == 200:
data = response.json()
if ('all_nodes' in data and
any(node in s for s in data['all_nodes']) and
'cluster_nodes' in data and
any(node in s for s in data['cluster_nodes'])):
if self._verbose:
print("Node joined the cluster")
else:
sys.stdout.write(".\n")
sys.stdout.flush()
return True
except ConnectionError as exception:
if self._verbose:
print('Could not connect to node')
if self._verbose:
print("Node hasn't joined the cluster yet")
else:
sys.stdout.write('.')
sys.stdout.flush()
def wait_until_status_stable(self, node):
"""
Waits until the cluster status is stable
:param node:
:return: bool
"""
print('- Waiting until cluster status is stable')
while True:
time.sleep(5)
url = '{}/_up'.format(self.get_node_url(node))
try:
if self._username:
auth = HTTPBasicAuth(self._username, self._password)
else:
auth = None
response = requests.get(url, auth=auth)
self.verbose_response(response)
if response.status_code == 200:
data = response.json()
if data['status'] == 'ok':
if self._verbose:
print("Cluster status is OK")
else:
sys.stdout.write(".\n")
sys.stdout.flush()
return True
except ConnectionError as exception:
if self._verbose:
print('Could not connect to node')
return True
if self._verbose:
print('Cluster status is not stable yet')
else:
sys.stdout.write('.')
sys.stdout.flush()
def get_latest_version(self, node):
"""
Gets the latest version available in the repository
:param node: string
:return: bool
"""
result = self.ssh_command(node, self._latest_version_command)
if result['exit_code'] != 0:
return False
latest_version = result['stdout'].strip()
if StrictVersion(latest_version) > StrictVersion('0.0.0'):
return latest_version
return False
def reboot(self, node):
print('- Rebooting')
self._rebooting = True
self.ssh_command(node, 'sudo /sbin/shutdown -r now')
def get_node_url(self, node):
"""
Gets a node URL
:param node: string
:return: string
"""
if self._ssl:
protocol = 'https'
else:
protocol = 'http'
return '{}://{}:{}'.format(protocol, node, self._port)
def ssh_command(self, host, command):
"""
Executes a SSH command
:param host: string
:param command: string
:return: dict
"""
p = subprocess.Popen(['ssh', '%s' % host, command],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = p.stdout.readlines()
stderr = p.stderr.readlines()
stdout_string = ''.join(stdout)
stderr_string = ''.join(stderr)
# Remove clutter
regex = re.compile(r"Connection .+? closed by remote host\.\n?", re.IGNORECASE)
stderr_string = regex.sub('', stderr_string).strip()
if stderr_string:
sys.stderr.write("SSH error from host {}: {}\n".format(host, stderr_string))
# Make a return code available
p.communicate()[0]
result = {
'stdout': stdout_string,
'stderr': stderr_string,
'exit_code': p.returncode,
}
return result
def upgrade_node(self, node):
print('# Node {}'.format(node))
self._service_start_time = datetime.datetime.now()
self._rebooting = False
if self._version:
# Only upgrade node if the current version is lower than the version to upgrade to
if not self.current_version_lower(node):
# CouchDB already up to date
if self._upgrade_system:
print('- Upgrading operating system')
if not self.upgrade_system(node):
sys.stderr.write("Failed to upgrade operating system\n")
return False
else:
if not self._os_upgrades_available:
print('No operating system upgrades available')
if self._force_reboot or (self._reboot and self._os_upgrades_available):
self.reboot(node)
else:
return True
if not self._rebooting:
# Stop CouchDB service
print('- Stopping CouchDB service')
if not self.stop_service(node):
sys.stderr.write("Failed to stop CouchDB service\n")
return False
# Upgrade the CouchDB software
print('- Upgrading CouchDB software')
if not self.upgrade_couchdb(node):
sys.stderr.write("Failed to upgrade CouchDB software\n")
return False
if self._upgrade_system:
print('- Upgrading operating system')
if not self.upgrade_system(node):
sys.stderr.write("Failed to upgrade operating system\n")
return False
else:
if not self._os_upgrades_available:
print('No operating system upgrades available')
if (self._force_reboot or
(self._reboot and (self._couchdb_upgrades_available or self._os_upgrades_available))):
self.reboot(node)
if not self._rebooting:
# Start CouchDB service
print('- Starting CouchDB service')
if not self.start_service(node):
sys.stderr.write("Failed to start CouchDB service\n")
return False
self.wait_until_joined(node)
self.wait_until_status_stable(node)
return True
def upgrade(self):
print('Performing a rolling upgrade of the CouchDB cluster')
if self._verbose:
print('Cluster nodes: {}'.format(json.dumps(self._nodes)))
if self._version == 'latest':
print('Determining the latest version')
latest_version = self.get_latest_version(self._nodes[0])
if latest_version:
print('Using latest version {} as version to upgrade to'.format(latest_version))
self._version = latest_version
else:
sys.stderr.write("Failed to determine the latest version\n")
return False
for node in self._nodes:
if not self.upgrade_node(node):
sys.stderr.write("Failed to patch the CouchDB cluster\n")
return False
print ('Successfully upgraded all nodes of the CouchDB cluster')
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Performs a rolling upgrade of a CouchDB cluster')
parser.add_argument('-n', '--nodes', help='Comma separated list of host names or IP addresses of nodes',
required=True)
parser.add_argument('-u', '--username', help="Username for authentication")
parser.add_argument('-P', '--password', help="Password for authentication")
parser.add_argument('-p', '--port', help='CouchDB HTTP port. Default 5984', type=int, default=5984)
parser.add_argument('-s', '--ssl', help='Connect with https', action='store_true')
parser.add_argument('--service-stop-command',
help="Shell command to stop the CouchDB service on a node. "
"Default 'sudo systemctl stop couchdb'",
default='sudo systemctl stop couchdb')
parser.add_argument('--service-start-command',
help="Shell command to start the CouchDB service on a node. "
"Default 'sudo systemctl start couchdb'",
default='sudo systemctl start couchdb')
parser.add_argument('--upgrade-command',
help="Command to upgrade CouchDB on a node. "
"Default 'sudo yum clean all && sudo yum install -y couchdb'",
default='sudo yum clean all && sudo yum install -y couchdb')
parser.add_argument('--latest-version-command',
help="Command to get the latest version in the repository. "
"Default \"sudo yum clean all >/dev/null 2>&1 && sudo yum list all couchdb |"
" grep couchdb | awk '{ print $2 }' | cut -d '-' -f1 | sort --version-sort -r |"
" head -n 1\"",
default="sudo yum clean all >/dev/null 2>&1 && sudo yum list all couchdb |"
" grep couchdb | awk '{ print $2 }' | cut -d '-' -f1 | sort --version-sort -r |"
" head -n 1")
parser.add_argument('--check-stable-command',
help="Command to check if the cluster status is stable again after a node that"
" rejoined the cluster. Default \"stable=$(grep 'publish cluster `stable` event'"
" /var/log/couchdb/couchdb.log | while read -r line; do timestamp=$(echo $line |"
" awk '{ print $2 }'); if [ \"$(date -d\"$timestamp\" +'%%Y%%m%%d%%H%%M%%S')\" -ge"
" \"{service_start_time}\" ]; then echo 'yes'; fi; done); if [ \"$stable\" != \"yes\" ];"
" then exit 1; fi\"",
default="stable=$(grep 'publish cluster `stable` event' /var/log/couchdb/couchdb.log |"
" while read -r line; do timestamp=$(echo $line | awk '{ print $2 }'); if ["
" \"$(date -d\"$timestamp\" +'%Y%m%d%H%M%S')\" -ge \"{service_start_time}\" ];"
" then echo 'yes'; fi; done); if [ \"$stable\" != \"yes\" ]; then exit 1; fi")
parser.add_argument('--version',
help="A specific version to upgrade to or 'latest'. If 'latest', then the highest"
" available version in the repository will be determined. Nodes with a version"
" equal or higher will be skipped. Default 'latest'",
default='latest')
parser.add_argument('--upgrade-system-command',
help="Command to upgrade operating system. Default 'sudo yum clean all && sudo yum update -y'",
default='sudo yum clean all && sudo yum update -y')
parser.add_argument('--upgrade-system', help='Upgrades the operating system also after upgrading CouchDB',
action='store_true')
parser.add_argument('--reboot', help='Reboots the server if an actual upgrade took place', action='store_true')
parser.add_argument('--force-reboot', help='Always reboots the server, even though no upgrade occurred because'
' the version was already the latest', action='store_true')
parser.add_argument('-v', '--verbose', help='Display of more information', action='store_true')
args = parser.parse_args()
# Create nodes list from comma separated string
nodes = args.nodes.replace(' ', '').split(',')
couchdb_upgrader = CouchDbUpgrader(nodes,
args.username,
args.password,
args.port,
args.ssl,
args.service_stop_command,
args.service_start_command,
args.upgrade_command,
args.latest_version_command,
args.check_stable_command,
args.version,
args.upgrade_system_command,
args.upgrade_system,
args.reboot,
args.force_reboot,
args.verbose)
if not couchdb_upgrader.upgrade():
exit(1) | en | 0.678701 | #!/usr/bin/env python # couchdb_upgrade.py # https://github.com/pietervogelaar/couchdb_upgrade # # Performs a rolling upgrade of a CouchDB cluster # # Installing dependencies: # pip install requests # # MIT License # # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. Performs a rolling upgrade of a CouchDB cluster Constructor :param nodes: list Host names or IP addresses of nodes :param username: string :param password: <PASSWORD> :param port: int :param ssl: bool :param service_stop_command: string :param service_start_command: string :param upgrade_command: string :param latest_version_command: string :param check_stable_command: string :param version: string :param upgrade_system_command: string :param upgrade_system: string :param reboot: bool :param force_reboot: bool :param verbose: bool # Internal class attributes Checks if the current version of CouchDB on the node is lower than the version to upgrade to :param node: string :return: bool Stops the CouchDB service on the node :param node: string :return: bool Upgrades the CouchDB software on the node :param node: string :return: bool Upgrades the operating system :param node: string :return: bool Starts the CouchDB service on the node :param node: string :return: bool Waits until the node joined the cluster :param node: :return: bool Waits until the cluster status is stable :param node: :return: bool Gets the latest version available in the repository :param node: string :return: bool Gets a node URL :param node: string :return: string Executes a SSH command :param host: string :param command: string :return: dict # Remove clutter # Make a return code available # Only upgrade node if the current version is lower than the version to upgrade to # CouchDB already up to date # Stop CouchDB service # Upgrade the CouchDB software # Start CouchDB service # Create nodes list from comma separated string | 2.207471 | 2 |
startup/98-ramp.py | MikeHart85/IOS_profile_collection | 0 | 6633189 | <gh_stars>0
# this is already done in nslsii.configure_base but being explicit here
import bluesky.plans as bp
import bluesky.plan_stubs as bps
import bluesky.preprocessors as bpp
from collections import ChainMap
from ophyd import StatusBase
import time
#from bluesky.spec_api import inner_spec_decorator, setup_plot, setup_livetable, _figure_name
import bluesky as bs
import builtins
input = builtins.input
def change_epu_flt_link(new_target):
v = (yield from bps.read(epu1.flt.input_pv))
if v is None:
return
n = epu1.flt.input_pv.name
cur_pv = v[n]['value']
pts = cur_pv.split(' ', maxsplit=1)
new_pv = ' '.join([new_target] + pts[1:])
yield from bps.abs_set(epu1.flt.input_pv, new_pv)
class NormPlot(bs.callbacks.LivePlot):
def event(self,doc):
doc = dict(doc)
doc['data'] = dict(doc['data'])
try:
doc['data']['norm_intensity'] = doc['data']['sclr_ch4']/doc['data']['sclr_ch3']
except KeyError:
pass
super().event(doc)
class norm_plot(bs.callbacks.LivePlot):
def __init__(self, *args, func, **kwargs):
super().__init__(*args, **kwargs)
self._doc_func = func
def event(self,doc):
doc = self._doc_func(func)
super().event(doc)
def simple_norm(doc):
try:
doc.data['norm_intensity'] = doc.data['sclr_ch4']/doc.data['sclr_ch3']
except KeyError:
pass
return doc
#This is no longer supported. Define a LivePlot callback or use best effort callback
#def setup_norm_plot(*, motors, gs):
# """Setup a LivePlot by inspecting motors and gs.
# If motors is empty, use sequence number.
# """
# y_key = gs.PLOT_Y
# if motors:
# x_key = first_key_heuristic(list(motors)[0])
# fig_name = _figure_name('BlueSky {} v {}'.format(y_key, x_key))
# fig = plt.figure(fig_name)
# return NormPlot(y_key, x_key, fig=fig)
# else:
# fig_name = _figure_name('BlueSky: {} v sequence number'.format(y_key))
# fig = plt.figure(fig_name)
# return NormPlot(y_key, fig=fig)
def _run_E_ramp(dets, start, stop, velocity, deadband, *,
streamname='primary', md=None):
if md is None:
md = {}
md = ChainMap(md, {'plan_args': {'dets': list(map(repr, dets)),
'start': start,
'stop': stop,
'velocity': velocity,
'deadband': deadband},
'plan_name': 'E_ramp',
'motors': [pgm.energy.name]})
# put the energy at the starting value
yield from bps.abs_set(pgm.energy, start, wait=True)
yield from bps.abs_set(pgm.fly.start_sig, start, wait=True)
yield from bps.abs_set(pgm.fly.stop_sig, stop, wait=True)
yield from bps.abs_set(pgm.fly.velocity, velocity, wait=True)
if specs in dets:
specs.stage()
# TODO do this with stage
old_db = epu1.flt.output_deadband.get()
yield from bps.abs_set(epu1.flt.output_deadband, deadband)
# get the old vlaue
v = (yield from bps.read(epu1.flt.input_pv))
if v is None:
old_link = ''
else:
n = epu1.flt.input_pv.name
old_link = v[n]['value']
# define a clean up plan
def clean_up():
# move the energy setpoint to where the energy really is
yield from bps.abs_set(pgm.energy, pgm.energy.position, wait=True)
# set the interpolator to look at what it was looking at before
# the scan. This should be the energy set point.
yield from bps.abs_set(epu1.flt.input_pv, old_link, wait=True)
yield from bps.abs_set(epu1.flt.output_deadband, old_db, wait=True)
if specs in dets:
specs.unstage()
# change to track the readout energy
yield from change_epu_flt_link(pgm_energy.readback.pvname)
def go_plan():
ret = (yield from bps.abs_set(pgm.fly.fly_start, 1))
st = StatusBase()
enum_map = pgm.fly.scan_status.describe()[pgm.fly.scan_status.name]['enum_strs']
def _done_cb(value, old_value, **kwargs):
old_value = enum_map[int(old_value)]
value = enum_map[int(value)]
if old_value != value and value == 'Ready':
st._finished()
pgm.fly.scan_status.clear_sub(_done_cb)
if ret is not None:
pgm.fly.scan_status.subscribe(_done_cb, run=False)
else:
st._finished()
print('SIM MODE')
return st
def inner_plan():
yield from trigger_and_read(dets, name=streamname)
print(md)
rp = ramp_plan(go_plan(), pgm.energy,
inner_plan, period=None, md=md)
return (yield from bpp.finalize_wrapper(rp, clean_up()))
# NOTE : This function has been changed to take DETS as an argument
def E_ramp(dets, start, stop, velocity, time=None, *,
streamname='primary', deadband=8, md=None):
'''
dets: need to supply the detectors used
'''
motors = [pgm.energy]
# DEPRECATED
#inner = inner_spec_decorator('E_ramp', time, motors)(_run_E_ramp)
inner = _run_E_ramp
return (yield from inner(dets + [pgm.energy], start, stop, velocity,
streamname=streamname, deadband=deadband, md=md))
# THIS is no longer supported
#gs.SUB_FACTORIES['E_ramp'] = [setup_plot, setup_livetable]
#gs.SUB_FACTORIES['E_ramp'] = [setup_norm_plot, setup_livetable]
def _epu_ramp(dets, start, stop):
def go_plan():
return (yield from bps.abs_set(epu1.gap, stop, wait=False))
def inner_plan():
yield from trigger_and_read(dets)
yield from bps.abs_set(epu1.gap, start, wait=True)
return (yield from (ramp_plan(go_plan(), pgm.energy,
inner_plan, period=None, md=md)))
def fix_epu():
# move the energy setpoint to where the energy really is
yield from bps.abs_set(pgm.energy, pgm.energy.position, wait=True)
# set the interpolator to look at what it was looking at before
# the scan. This should be the energy set point.
yield from bps.abs_set(epu1.flt.input_pv, 'XF:23ID2-OP{Mono}Enrgy-SP CP MS', wait=True)
yield from bps.abs_set(epu1.flt.output_deadband, 0, wait=True)
| # this is already done in nslsii.configure_base but being explicit here
import bluesky.plans as bp
import bluesky.plan_stubs as bps
import bluesky.preprocessors as bpp
from collections import ChainMap
from ophyd import StatusBase
import time
#from bluesky.spec_api import inner_spec_decorator, setup_plot, setup_livetable, _figure_name
import bluesky as bs
import builtins
input = builtins.input
def change_epu_flt_link(new_target):
v = (yield from bps.read(epu1.flt.input_pv))
if v is None:
return
n = epu1.flt.input_pv.name
cur_pv = v[n]['value']
pts = cur_pv.split(' ', maxsplit=1)
new_pv = ' '.join([new_target] + pts[1:])
yield from bps.abs_set(epu1.flt.input_pv, new_pv)
class NormPlot(bs.callbacks.LivePlot):
def event(self,doc):
doc = dict(doc)
doc['data'] = dict(doc['data'])
try:
doc['data']['norm_intensity'] = doc['data']['sclr_ch4']/doc['data']['sclr_ch3']
except KeyError:
pass
super().event(doc)
class norm_plot(bs.callbacks.LivePlot):
def __init__(self, *args, func, **kwargs):
super().__init__(*args, **kwargs)
self._doc_func = func
def event(self,doc):
doc = self._doc_func(func)
super().event(doc)
def simple_norm(doc):
try:
doc.data['norm_intensity'] = doc.data['sclr_ch4']/doc.data['sclr_ch3']
except KeyError:
pass
return doc
#This is no longer supported. Define a LivePlot callback or use best effort callback
#def setup_norm_plot(*, motors, gs):
# """Setup a LivePlot by inspecting motors and gs.
# If motors is empty, use sequence number.
# """
# y_key = gs.PLOT_Y
# if motors:
# x_key = first_key_heuristic(list(motors)[0])
# fig_name = _figure_name('BlueSky {} v {}'.format(y_key, x_key))
# fig = plt.figure(fig_name)
# return NormPlot(y_key, x_key, fig=fig)
# else:
# fig_name = _figure_name('BlueSky: {} v sequence number'.format(y_key))
# fig = plt.figure(fig_name)
# return NormPlot(y_key, fig=fig)
def _run_E_ramp(dets, start, stop, velocity, deadband, *,
streamname='primary', md=None):
if md is None:
md = {}
md = ChainMap(md, {'plan_args': {'dets': list(map(repr, dets)),
'start': start,
'stop': stop,
'velocity': velocity,
'deadband': deadband},
'plan_name': 'E_ramp',
'motors': [pgm.energy.name]})
# put the energy at the starting value
yield from bps.abs_set(pgm.energy, start, wait=True)
yield from bps.abs_set(pgm.fly.start_sig, start, wait=True)
yield from bps.abs_set(pgm.fly.stop_sig, stop, wait=True)
yield from bps.abs_set(pgm.fly.velocity, velocity, wait=True)
if specs in dets:
specs.stage()
# TODO do this with stage
old_db = epu1.flt.output_deadband.get()
yield from bps.abs_set(epu1.flt.output_deadband, deadband)
# get the old vlaue
v = (yield from bps.read(epu1.flt.input_pv))
if v is None:
old_link = ''
else:
n = epu1.flt.input_pv.name
old_link = v[n]['value']
# define a clean up plan
def clean_up():
# move the energy setpoint to where the energy really is
yield from bps.abs_set(pgm.energy, pgm.energy.position, wait=True)
# set the interpolator to look at what it was looking at before
# the scan. This should be the energy set point.
yield from bps.abs_set(epu1.flt.input_pv, old_link, wait=True)
yield from bps.abs_set(epu1.flt.output_deadband, old_db, wait=True)
if specs in dets:
specs.unstage()
# change to track the readout energy
yield from change_epu_flt_link(pgm_energy.readback.pvname)
def go_plan():
ret = (yield from bps.abs_set(pgm.fly.fly_start, 1))
st = StatusBase()
enum_map = pgm.fly.scan_status.describe()[pgm.fly.scan_status.name]['enum_strs']
def _done_cb(value, old_value, **kwargs):
old_value = enum_map[int(old_value)]
value = enum_map[int(value)]
if old_value != value and value == 'Ready':
st._finished()
pgm.fly.scan_status.clear_sub(_done_cb)
if ret is not None:
pgm.fly.scan_status.subscribe(_done_cb, run=False)
else:
st._finished()
print('SIM MODE')
return st
def inner_plan():
yield from trigger_and_read(dets, name=streamname)
print(md)
rp = ramp_plan(go_plan(), pgm.energy,
inner_plan, period=None, md=md)
return (yield from bpp.finalize_wrapper(rp, clean_up()))
# NOTE : This function has been changed to take DETS as an argument
def E_ramp(dets, start, stop, velocity, time=None, *,
streamname='primary', deadband=8, md=None):
'''
dets: need to supply the detectors used
'''
motors = [pgm.energy]
# DEPRECATED
#inner = inner_spec_decorator('E_ramp', time, motors)(_run_E_ramp)
inner = _run_E_ramp
return (yield from inner(dets + [pgm.energy], start, stop, velocity,
streamname=streamname, deadband=deadband, md=md))
# THIS is no longer supported
#gs.SUB_FACTORIES['E_ramp'] = [setup_plot, setup_livetable]
#gs.SUB_FACTORIES['E_ramp'] = [setup_norm_plot, setup_livetable]
def _epu_ramp(dets, start, stop):
def go_plan():
return (yield from bps.abs_set(epu1.gap, stop, wait=False))
def inner_plan():
yield from trigger_and_read(dets)
yield from bps.abs_set(epu1.gap, start, wait=True)
return (yield from (ramp_plan(go_plan(), pgm.energy,
inner_plan, period=None, md=md)))
def fix_epu():
# move the energy setpoint to where the energy really is
yield from bps.abs_set(pgm.energy, pgm.energy.position, wait=True)
# set the interpolator to look at what it was looking at before
# the scan. This should be the energy set point.
yield from bps.abs_set(epu1.flt.input_pv, 'XF:23ID2-OP{Mono}Enrgy-SP CP MS', wait=True)
yield from bps.abs_set(epu1.flt.output_deadband, 0, wait=True) | en | 0.802668 | # this is already done in nslsii.configure_base but being explicit here #from bluesky.spec_api import inner_spec_decorator, setup_plot, setup_livetable, _figure_name #This is no longer supported. Define a LivePlot callback or use best effort callback #def setup_norm_plot(*, motors, gs): # """Setup a LivePlot by inspecting motors and gs. # If motors is empty, use sequence number. # """ # y_key = gs.PLOT_Y # if motors: # x_key = first_key_heuristic(list(motors)[0]) # fig_name = _figure_name('BlueSky {} v {}'.format(y_key, x_key)) # fig = plt.figure(fig_name) # return NormPlot(y_key, x_key, fig=fig) # else: # fig_name = _figure_name('BlueSky: {} v sequence number'.format(y_key)) # fig = plt.figure(fig_name) # return NormPlot(y_key, fig=fig) # put the energy at the starting value # TODO do this with stage # get the old vlaue # define a clean up plan # move the energy setpoint to where the energy really is # set the interpolator to look at what it was looking at before # the scan. This should be the energy set point. # change to track the readout energy # NOTE : This function has been changed to take DETS as an argument dets: need to supply the detectors used # DEPRECATED #inner = inner_spec_decorator('E_ramp', time, motors)(_run_E_ramp) # THIS is no longer supported #gs.SUB_FACTORIES['E_ramp'] = [setup_plot, setup_livetable] #gs.SUB_FACTORIES['E_ramp'] = [setup_norm_plot, setup_livetable] # move the energy setpoint to where the energy really is # set the interpolator to look at what it was looking at before # the scan. This should be the energy set point. | 2.172428 | 2 |
installer.py | cartologic/cartoview_geo_observation | 3 | 6633190 | <filename>installer.py
# -*- coding: utf-8 -*-
__author__ = "cartologic"
info = {
"title": "GeoObservation",
"description": """Users can submit new reports, review existing reports, and comment and vote on reports or observations submitted by other users. They can track the status of problems or observations they have reported.
Field observations and mobile data collection, whether by professional monitoring staff or by volunteers in e.g. citizen science and mobile crowdsourcing projects. Field observations app is also useful as a platform for building a variety of mobile-first websites and CRUD applications.Allows users to submit problems or observations.
The application has been optimized for smartphones but is responsively designed to be used on smartphones, tablets, and desktop computers.""",
"author": "Cartologic",
"home_page": "http://cartoview.org/apps/test",
"help_url": "http://cartoview.org/apps/test/help/",
"tags": ["app", "map", "collector"],
"licence": "BSD-2",
"author_website": "http://cartologic.com",
"single_instance": False
}
def install():
pass
def uninstall():
pass
| <filename>installer.py
# -*- coding: utf-8 -*-
__author__ = "cartologic"
info = {
"title": "GeoObservation",
"description": """Users can submit new reports, review existing reports, and comment and vote on reports or observations submitted by other users. They can track the status of problems or observations they have reported.
Field observations and mobile data collection, whether by professional monitoring staff or by volunteers in e.g. citizen science and mobile crowdsourcing projects. Field observations app is also useful as a platform for building a variety of mobile-first websites and CRUD applications.Allows users to submit problems or observations.
The application has been optimized for smartphones but is responsively designed to be used on smartphones, tablets, and desktop computers.""",
"author": "Cartologic",
"home_page": "http://cartoview.org/apps/test",
"help_url": "http://cartoview.org/apps/test/help/",
"tags": ["app", "map", "collector"],
"licence": "BSD-2",
"author_website": "http://cartologic.com",
"single_instance": False
}
def install():
pass
def uninstall():
pass
| en | 0.933465 | # -*- coding: utf-8 -*- Users can submit new reports, review existing reports, and comment and vote on reports or observations submitted by other users. They can track the status of problems or observations they have reported. Field observations and mobile data collection, whether by professional monitoring staff or by volunteers in e.g. citizen science and mobile crowdsourcing projects. Field observations app is also useful as a platform for building a variety of mobile-first websites and CRUD applications.Allows users to submit problems or observations. The application has been optimized for smartphones but is responsively designed to be used on smartphones, tablets, and desktop computers. | 2.256048 | 2 |
model-optimizer/extensions/front/tf/softplus_ext.py | monroid/openvino | 2,406 | 6633191 | <reponame>monroid/openvino
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from mo.front.extractor import FrontExtractorOp
from extensions.ops.activation_ops import SoftPlus
class SoftPlusExtractor(FrontExtractorOp):
op = 'Softplus'
enabled = True
@classmethod
def extract(cls, node):
SoftPlus.update_node_stat(node, {})
return cls.enabled
| # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from mo.front.extractor import FrontExtractorOp
from extensions.ops.activation_ops import SoftPlus
class SoftPlusExtractor(FrontExtractorOp):
op = 'Softplus'
enabled = True
@classmethod
def extract(cls, node):
SoftPlus.update_node_stat(node, {})
return cls.enabled | de | 0.248959 | # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 | 1.70479 | 2 |
run_saccader103.py | PopGalacticHistory/imagewalker | 2 | 6633192 |
#from image_env_mnist1 import Image_env1
from RL_saccader_x1 import DeepQNetwork
from RL_networks import Stand_alone_net
import numpy as np
import time
import pickle
import copy
import SYCLOP_env as syc
from misc import *
import sys
import os
import tensorflow as tf
import cv2
# cv2.ocl.setUseOpenCL(True)
hp=HP()
# if not os.path.exists(hp.this_run_path):
# os.makedirs(hp.this_run_path)
# else:
# error('run name already exists!')
lsbjob=os.getenv('LSB_JOBID')
lsbjob = '' if lsbjob is None else lsbjob
hp.save_path = 'saved_runs'
hp.this_run_name = sys.argv[0] + '_noname_' + sys.argv[-1] + '_' + str(int(time.time())) +'_' + lsbjob
# hp.description = "only 2nd image from videos 1st frame, penalty for speed, soft q learning"
hp.description = "padding+fishyey + drift network pre-trained on Stanford dataset patches, on Stanford dataset"
hp.mem_depth = 1
hp.padding=[32,32]
hp.max_episode = 10000
hp.steps_per_episode = 100
hp.steps_between_learnings = 100
hp.steps_before_learning_begins = 100
hp.saccade_observation_scale = 100
hp.fading_mem = 0.0
hp.drift_signal_to_saccade_en = 1
hp.fisheye_file = 'fisheye_101.pkl'
hp.tau_int = 20
hp.image_path = os.getenv('HOME')+'/datasets/Stanford40/JPEGImages/*'
hp.num_images = 200
hp.images_per_scene=20
recorder_file = 'records.pkl'
hp_file = 'hp.pkl'
hp.contrast_range = [1.0,1.1]
hp.dqn_initial_network = None # 'saved_runs/run_syclop_generic_cnn_vfb_neu.py_noname_1590422112_0/tempX_1.nwk' # None #'saved_runs/run_syclop_generic_cnn_vfb_neu.py_noname_1589383850_0/tempX_1.nwk'
hp.drift_initial_network='ref_nets/drift_net_sf40_ae2//trained.nwk' #'ref_nets/drift_net1/trained.nwk'
hp.drift_net_abs_en = False
if hp.fisheye_file is None:
fy_dict = None
else:
with open(hp.fisheye_file,'rb') as f:
fy_dict=pickle.load(f)
def deploy_logs():
if not os.path.exists(hp.save_path):
os.makedirs(hp.save_path)
dir_success = False
for sfx in range(1): # todo legacy
candidate_path = hp.save_path + '/' + hp.this_run_name + '_' + str(os.getpid()) + '/'
if not os.path.exists(candidate_path):
hp.this_run_path = candidate_path
os.makedirs(hp.this_run_path)
dir_success = True
break
if not dir_success:
error('run name already exists!')
sys.stdout = Logger(hp.this_run_path+'log.log')
print('results are in:', hp.this_run_path)
print('description: ', hp.description)
print('hyper-parameters (partial):', hp)
def local_observer(sensor,agent,integrator):
normfactor=1.0/256.0
return np.concatenate([normfactor*sensor.dvs_view.reshape([-1]), agent.qdot, integrator.reshape([-1])],axis=0)
def saccade_observer(sensor,drift_observation):
normfactor=1.0/256.0
return np.concatenate([normfactor*sensor.frame_view.reshape([-1]), hp.drift_signal_to_saccade_en*drift_observation.reshape([-1])],axis=0)
def drift_state_for_saccade_observation(current_state, integrator_state):
return integrator_state
def drift_state_for_integrator(drift_net_state, abs_en=False,layer=2):
if abs_en:
return np.abs(drift_net_state[layer])
else:
return drift_net_state[layer]
def index_to_coord(i,xymax,offset=[0,0]):
return [i%xymax[1]+offset[0],-(i//xymax[1]+offset[1])]
def run_env():
scene = None
old_policy_map=0
step = 0
best_thus_far = -1e10
running_ave_reward = 0
running_timing = 0
tlist=[0]*13
for episode in range(hp.max_episode):
saccade_observation = 0*np.random.uniform(0,1,size=[hp.mem_depth, saccade_observation_size])
saccade_observation_ = 0*np.random.uniform(0,1,size=[hp.mem_depth, saccade_observation_size])
image, _ = read_random_image_from_path(hp.image_path, grayscale=True, padding=hp.padding)
del scene
# print('---------------------------------------------------')
scene = syc.Scene(image_matrix=image)
saccade_agent.reset(
q_reset=np.array([100, 100]),
max_q=[scene.maxx - sensor.hp.winx, scene.maxy - sensor.hp.winy])
integrator_state = 0
saccade_agent.reset()
# print('debug scene: ', scene.maxx, scene.maxy )
# print('debug scene2: ', scene.image.shape )
# print('debug agent: ', saccade_agent.q )
# sensor.reset()
# print('debug, get_view',sensor.get_view( scene, saccade_agent).shape)
# print(scene.maxy - saccade_agent.q[1] - sensor.hp.winy, scene.maxy - saccade_agent.q[1], saccade_agent.q[0], saccade_agent.q[0] + sensor.hp.winx)
# print(scene.maxy , saccade_agent.q[1] , sensor.hp.winy, scene.maxy , saccade_agent.q[1], saccade_agent.q[0], saccade_agent.q[0] , sensor.hp.winx)
sensor.update(scene, saccade_agent)
sensor.update(scene, saccade_agent)
saccade_action = 64 * 32 + 32
for step_prime in range(hp.steps_per_episode):
# print('debug saccade_agent.q:', saccade_agent.q)
tlist[0] = time.time()
drift_net_state = drift_net.eval_incl_layers(1. / 256 * sensor.central_frame_view.reshape(
[1, -1])) # todo - change to some integrated version of the DVS view when adding the drift loop
tlist[1] = time.time()
high_pass_layer = drift_state_for_integrator(drift_net_state, abs_en=hp.drift_net_abs_en) - integrator_state
tlist[2] = time.time()
integrator_state = (1 - 1. / hp.tau_int) * integrator_state + (
1. / hp.tau_int) * drift_state_for_integrator(drift_net_state)
tlist[3] = time.time()
reward.update_rewards(sensor=sensor, agent=saccade_agent, network=high_pass_layer)
tlist[4] = time.time()
saccade_RL.store_transition(saccade_observation.reshape([-1]), saccade_action, reward.reward)
tlist[5] = time.time()
saccade_observation = hp.saccade_observation_scale * saccade_observer(sensor,
drift_state_for_saccade_observation(
drift_net_state,
integrator_state))
tlist[6] = time.time()
saccade_action, ent = saccade_RL.choose_action(saccade_observation.reshape([-1]),
discard_black_areas=True,
black_area=(sensor.frame_view>1e-9))
running_ave_reward = 0.999 * running_ave_reward + 0.001 * np.array(
[reward.reward] + reward.rewards.tolist())
tlist[7] = time.time()
dq = index_to_coord(saccade_action,sensor.frame_view.shape,offset=[-31,-31])
tlist[8] = time.time()
dq_rescaled = dq if hp.fisheye_file is None else undistort_q_poly(dq,fy_dict['w']).squeeze().astype(np.int64)
tlist[9] = time.time()
saccade_agent.act(dq_rescaled)
tlist[10] = time.time()
sensor.update(scene, saccade_agent)
tlist[11] = time.time()
running_timing = 0.999 * running_timing + 0.001 * np.concatenate((np.diff(np.array(tlist[:-1])),[tlist[-1]]))
# scene.update()
# observation_ *= hp.fading_mem
# observation_ += local_observer(sensor, agent,-integrator_state) # todo: generalize
# observation = copy.copy(observation_)
step += 1
if (step > hp.steps_before_learning_begins) and (step % hp.steps_between_learnings == 0):
t0=time.time()
saccade_RL.learn()
tlist[12]=time.time()-t0
if step % 10000 < 1000:
pass
# recorder.record(
# [saccade_agent.q[0], saccade_agent.q[1], reward.reward] + reward.rewards.tolist() + [saccade_RL.epsilon])
# [agent.q_ana[0], agent.q_ana[1], reward.reward] + reward.rewards.tolist() + [RL.epsilon])
if step%1000 ==0:
print(episode,step,' running reward ',running_ave_reward)
print(' entropy:', ent)
print('timing = ', running_timing)
if running_ave_reward[0] > best_thus_far:
best_thus_far = running_ave_reward[0]
saccade_RL.dqn.save_nwk_param(hp.this_run_path+'best_liron.nwk')
print('saved best network, mean reward: ', best_thus_far)
if step%10000 ==0:
# recorder.plot()
saccade_RL.dqn.save_nwk_param(hp.this_run_path+'tempX_saccade.nwk')
drift_net.save_nwk_param(hp.this_run_path+'tempX_drift.nwk')
# debug_policy_plot()
if step % 100000 == 0:
recorder.save(hp.this_run_path+recorder_file)
# recorder.save(hp.this_run_path+recorder_file)
if __name__ == "__main__":
recorder = Recorder(n=6)
sensor = syc.Sensor( fisheye=fy_dict)
saccade_agent = syc.Saccadic_Agent()
reward = syc.Rewards(reward_types=['network'],relative_weights=[100.0])
# observation_size = sensor.hp.winx*sensor.hp.winy*2
saccade_observation_size = 64*64+16
# saccade_RL = DeepQNetwork(np.prod(saccade_agent.max_q), saccade_observation_size,
saccade_RL=DeepQNetwork(64*64, saccade_observation_size,
n_features_shaped=list(np.shape(sensor.dvs_view))+[1],
shape_fun= None,
reward_decay=0.99,
replace_target_iter=10,
memory_size=100000,
e_greedy_increment=0.0001,
learning_rate=0.0025,
double_q=True,
dqn_mode=True,
soft_q_type='boltzmann',
beta_schedule=[[400000//hp.steps_between_learnings, 1], [700000//hp.steps_between_learnings, 10]],
arch='conv_saccades_v1',
n_modulating_features=16
)
# at this point drift network is a standalone network taken from some external source (e.g. pretrained)
# in future it will be an action generating network from the drift loop
# drift_net = Stand_alone_net(16*16,10,arch='mlp', layer_size = [None]+[100]+[100]+[ None])
drift_net = Stand_alone_net(16*16,16*16,arch='mlp', layer_size = [None]+[100,16,100]+[ None]) #ae
drift_net.assign_session_to_nwk(saccade_RL.dqn.sess)
saccade_RL.dqn.sess.run(tf.global_variables_initializer())
saccade_RL.dqn.reset()
drift_net.load_nwk_param(hp.drift_initial_network)
if not(hp.dqn_initial_network is None):
saccade_RL.dqn.load_nwk_param(hp.dqn_initial_network)
# hp.scene = scene.hp
hp.sensor = sensor.hp
hp.saccade_agent = saccade_agent.hp
hp.reward = reward.hp
hp.saccade_RL = saccade_RL.hp
deploy_logs()
with open(hp.this_run_path+hp_file, 'wb') as f:
pickle.dump(hp, f)
run_env()
print('results are in:', hp.this_run_path)
|
#from image_env_mnist1 import Image_env1
from RL_saccader_x1 import DeepQNetwork
from RL_networks import Stand_alone_net
import numpy as np
import time
import pickle
import copy
import SYCLOP_env as syc
from misc import *
import sys
import os
import tensorflow as tf
import cv2
# cv2.ocl.setUseOpenCL(True)
hp=HP()
# if not os.path.exists(hp.this_run_path):
# os.makedirs(hp.this_run_path)
# else:
# error('run name already exists!')
lsbjob=os.getenv('LSB_JOBID')
lsbjob = '' if lsbjob is None else lsbjob
hp.save_path = 'saved_runs'
hp.this_run_name = sys.argv[0] + '_noname_' + sys.argv[-1] + '_' + str(int(time.time())) +'_' + lsbjob
# hp.description = "only 2nd image from videos 1st frame, penalty for speed, soft q learning"
hp.description = "padding+fishyey + drift network pre-trained on Stanford dataset patches, on Stanford dataset"
hp.mem_depth = 1
hp.padding=[32,32]
hp.max_episode = 10000
hp.steps_per_episode = 100
hp.steps_between_learnings = 100
hp.steps_before_learning_begins = 100
hp.saccade_observation_scale = 100
hp.fading_mem = 0.0
hp.drift_signal_to_saccade_en = 1
hp.fisheye_file = 'fisheye_101.pkl'
hp.tau_int = 20
hp.image_path = os.getenv('HOME')+'/datasets/Stanford40/JPEGImages/*'
hp.num_images = 200
hp.images_per_scene=20
recorder_file = 'records.pkl'
hp_file = 'hp.pkl'
hp.contrast_range = [1.0,1.1]
hp.dqn_initial_network = None # 'saved_runs/run_syclop_generic_cnn_vfb_neu.py_noname_1590422112_0/tempX_1.nwk' # None #'saved_runs/run_syclop_generic_cnn_vfb_neu.py_noname_1589383850_0/tempX_1.nwk'
hp.drift_initial_network='ref_nets/drift_net_sf40_ae2//trained.nwk' #'ref_nets/drift_net1/trained.nwk'
hp.drift_net_abs_en = False
if hp.fisheye_file is None:
fy_dict = None
else:
with open(hp.fisheye_file,'rb') as f:
fy_dict=pickle.load(f)
def deploy_logs():
if not os.path.exists(hp.save_path):
os.makedirs(hp.save_path)
dir_success = False
for sfx in range(1): # todo legacy
candidate_path = hp.save_path + '/' + hp.this_run_name + '_' + str(os.getpid()) + '/'
if not os.path.exists(candidate_path):
hp.this_run_path = candidate_path
os.makedirs(hp.this_run_path)
dir_success = True
break
if not dir_success:
error('run name already exists!')
sys.stdout = Logger(hp.this_run_path+'log.log')
print('results are in:', hp.this_run_path)
print('description: ', hp.description)
print('hyper-parameters (partial):', hp)
def local_observer(sensor,agent,integrator):
normfactor=1.0/256.0
return np.concatenate([normfactor*sensor.dvs_view.reshape([-1]), agent.qdot, integrator.reshape([-1])],axis=0)
def saccade_observer(sensor,drift_observation):
normfactor=1.0/256.0
return np.concatenate([normfactor*sensor.frame_view.reshape([-1]), hp.drift_signal_to_saccade_en*drift_observation.reshape([-1])],axis=0)
def drift_state_for_saccade_observation(current_state, integrator_state):
return integrator_state
def drift_state_for_integrator(drift_net_state, abs_en=False,layer=2):
if abs_en:
return np.abs(drift_net_state[layer])
else:
return drift_net_state[layer]
def index_to_coord(i,xymax,offset=[0,0]):
return [i%xymax[1]+offset[0],-(i//xymax[1]+offset[1])]
def run_env():
scene = None
old_policy_map=0
step = 0
best_thus_far = -1e10
running_ave_reward = 0
running_timing = 0
tlist=[0]*13
for episode in range(hp.max_episode):
saccade_observation = 0*np.random.uniform(0,1,size=[hp.mem_depth, saccade_observation_size])
saccade_observation_ = 0*np.random.uniform(0,1,size=[hp.mem_depth, saccade_observation_size])
image, _ = read_random_image_from_path(hp.image_path, grayscale=True, padding=hp.padding)
del scene
# print('---------------------------------------------------')
scene = syc.Scene(image_matrix=image)
saccade_agent.reset(
q_reset=np.array([100, 100]),
max_q=[scene.maxx - sensor.hp.winx, scene.maxy - sensor.hp.winy])
integrator_state = 0
saccade_agent.reset()
# print('debug scene: ', scene.maxx, scene.maxy )
# print('debug scene2: ', scene.image.shape )
# print('debug agent: ', saccade_agent.q )
# sensor.reset()
# print('debug, get_view',sensor.get_view( scene, saccade_agent).shape)
# print(scene.maxy - saccade_agent.q[1] - sensor.hp.winy, scene.maxy - saccade_agent.q[1], saccade_agent.q[0], saccade_agent.q[0] + sensor.hp.winx)
# print(scene.maxy , saccade_agent.q[1] , sensor.hp.winy, scene.maxy , saccade_agent.q[1], saccade_agent.q[0], saccade_agent.q[0] , sensor.hp.winx)
sensor.update(scene, saccade_agent)
sensor.update(scene, saccade_agent)
saccade_action = 64 * 32 + 32
for step_prime in range(hp.steps_per_episode):
# print('debug saccade_agent.q:', saccade_agent.q)
tlist[0] = time.time()
drift_net_state = drift_net.eval_incl_layers(1. / 256 * sensor.central_frame_view.reshape(
[1, -1])) # todo - change to some integrated version of the DVS view when adding the drift loop
tlist[1] = time.time()
high_pass_layer = drift_state_for_integrator(drift_net_state, abs_en=hp.drift_net_abs_en) - integrator_state
tlist[2] = time.time()
integrator_state = (1 - 1. / hp.tau_int) * integrator_state + (
1. / hp.tau_int) * drift_state_for_integrator(drift_net_state)
tlist[3] = time.time()
reward.update_rewards(sensor=sensor, agent=saccade_agent, network=high_pass_layer)
tlist[4] = time.time()
saccade_RL.store_transition(saccade_observation.reshape([-1]), saccade_action, reward.reward)
tlist[5] = time.time()
saccade_observation = hp.saccade_observation_scale * saccade_observer(sensor,
drift_state_for_saccade_observation(
drift_net_state,
integrator_state))
tlist[6] = time.time()
saccade_action, ent = saccade_RL.choose_action(saccade_observation.reshape([-1]),
discard_black_areas=True,
black_area=(sensor.frame_view>1e-9))
running_ave_reward = 0.999 * running_ave_reward + 0.001 * np.array(
[reward.reward] + reward.rewards.tolist())
tlist[7] = time.time()
dq = index_to_coord(saccade_action,sensor.frame_view.shape,offset=[-31,-31])
tlist[8] = time.time()
dq_rescaled = dq if hp.fisheye_file is None else undistort_q_poly(dq,fy_dict['w']).squeeze().astype(np.int64)
tlist[9] = time.time()
saccade_agent.act(dq_rescaled)
tlist[10] = time.time()
sensor.update(scene, saccade_agent)
tlist[11] = time.time()
running_timing = 0.999 * running_timing + 0.001 * np.concatenate((np.diff(np.array(tlist[:-1])),[tlist[-1]]))
# scene.update()
# observation_ *= hp.fading_mem
# observation_ += local_observer(sensor, agent,-integrator_state) # todo: generalize
# observation = copy.copy(observation_)
step += 1
if (step > hp.steps_before_learning_begins) and (step % hp.steps_between_learnings == 0):
t0=time.time()
saccade_RL.learn()
tlist[12]=time.time()-t0
if step % 10000 < 1000:
pass
# recorder.record(
# [saccade_agent.q[0], saccade_agent.q[1], reward.reward] + reward.rewards.tolist() + [saccade_RL.epsilon])
# [agent.q_ana[0], agent.q_ana[1], reward.reward] + reward.rewards.tolist() + [RL.epsilon])
if step%1000 ==0:
print(episode,step,' running reward ',running_ave_reward)
print(' entropy:', ent)
print('timing = ', running_timing)
if running_ave_reward[0] > best_thus_far:
best_thus_far = running_ave_reward[0]
saccade_RL.dqn.save_nwk_param(hp.this_run_path+'best_liron.nwk')
print('saved best network, mean reward: ', best_thus_far)
if step%10000 ==0:
# recorder.plot()
saccade_RL.dqn.save_nwk_param(hp.this_run_path+'tempX_saccade.nwk')
drift_net.save_nwk_param(hp.this_run_path+'tempX_drift.nwk')
# debug_policy_plot()
if step % 100000 == 0:
recorder.save(hp.this_run_path+recorder_file)
# recorder.save(hp.this_run_path+recorder_file)
if __name__ == "__main__":
recorder = Recorder(n=6)
sensor = syc.Sensor( fisheye=fy_dict)
saccade_agent = syc.Saccadic_Agent()
reward = syc.Rewards(reward_types=['network'],relative_weights=[100.0])
# observation_size = sensor.hp.winx*sensor.hp.winy*2
saccade_observation_size = 64*64+16
# saccade_RL = DeepQNetwork(np.prod(saccade_agent.max_q), saccade_observation_size,
saccade_RL=DeepQNetwork(64*64, saccade_observation_size,
n_features_shaped=list(np.shape(sensor.dvs_view))+[1],
shape_fun= None,
reward_decay=0.99,
replace_target_iter=10,
memory_size=100000,
e_greedy_increment=0.0001,
learning_rate=0.0025,
double_q=True,
dqn_mode=True,
soft_q_type='boltzmann',
beta_schedule=[[400000//hp.steps_between_learnings, 1], [700000//hp.steps_between_learnings, 10]],
arch='conv_saccades_v1',
n_modulating_features=16
)
# at this point drift network is a standalone network taken from some external source (e.g. pretrained)
# in future it will be an action generating network from the drift loop
# drift_net = Stand_alone_net(16*16,10,arch='mlp', layer_size = [None]+[100]+[100]+[ None])
drift_net = Stand_alone_net(16*16,16*16,arch='mlp', layer_size = [None]+[100,16,100]+[ None]) #ae
drift_net.assign_session_to_nwk(saccade_RL.dqn.sess)
saccade_RL.dqn.sess.run(tf.global_variables_initializer())
saccade_RL.dqn.reset()
drift_net.load_nwk_param(hp.drift_initial_network)
if not(hp.dqn_initial_network is None):
saccade_RL.dqn.load_nwk_param(hp.dqn_initial_network)
# hp.scene = scene.hp
hp.sensor = sensor.hp
hp.saccade_agent = saccade_agent.hp
hp.reward = reward.hp
hp.saccade_RL = saccade_RL.hp
deploy_logs()
with open(hp.this_run_path+hp_file, 'wb') as f:
pickle.dump(hp, f)
run_env()
print('results are in:', hp.this_run_path)
| en | 0.277044 | #from image_env_mnist1 import Image_env1 # cv2.ocl.setUseOpenCL(True) # if not os.path.exists(hp.this_run_path): # os.makedirs(hp.this_run_path) # else: # error('run name already exists!') # hp.description = "only 2nd image from videos 1st frame, penalty for speed, soft q learning" # 'saved_runs/run_syclop_generic_cnn_vfb_neu.py_noname_1590422112_0/tempX_1.nwk' # None #'saved_runs/run_syclop_generic_cnn_vfb_neu.py_noname_1589383850_0/tempX_1.nwk' #'ref_nets/drift_net1/trained.nwk' # todo legacy # print('---------------------------------------------------') # print('debug scene: ', scene.maxx, scene.maxy ) # print('debug scene2: ', scene.image.shape ) # print('debug agent: ', saccade_agent.q ) # sensor.reset() # print('debug, get_view',sensor.get_view( scene, saccade_agent).shape) # print(scene.maxy - saccade_agent.q[1] - sensor.hp.winy, scene.maxy - saccade_agent.q[1], saccade_agent.q[0], saccade_agent.q[0] + sensor.hp.winx) # print(scene.maxy , saccade_agent.q[1] , sensor.hp.winy, scene.maxy , saccade_agent.q[1], saccade_agent.q[0], saccade_agent.q[0] , sensor.hp.winx) # print('debug saccade_agent.q:', saccade_agent.q) # todo - change to some integrated version of the DVS view when adding the drift loop # scene.update() # observation_ *= hp.fading_mem # observation_ += local_observer(sensor, agent,-integrator_state) # todo: generalize # observation = copy.copy(observation_) # recorder.record( # [saccade_agent.q[0], saccade_agent.q[1], reward.reward] + reward.rewards.tolist() + [saccade_RL.epsilon]) # [agent.q_ana[0], agent.q_ana[1], reward.reward] + reward.rewards.tolist() + [RL.epsilon]) # recorder.plot() # debug_policy_plot() # recorder.save(hp.this_run_path+recorder_file) # observation_size = sensor.hp.winx*sensor.hp.winy*2 # saccade_RL = DeepQNetwork(np.prod(saccade_agent.max_q), saccade_observation_size, # at this point drift network is a standalone network taken from some external source (e.g. pretrained) # in future it will be an action generating network from the drift loop # drift_net = Stand_alone_net(16*16,10,arch='mlp', layer_size = [None]+[100]+[100]+[ None]) #ae # hp.scene = scene.hp | 2.075562 | 2 |
django_pg/models/fields/__init__.py | OlgaBorisova/django-pgfields | 1 | 6633193 | from __future__ import absolute_import, unicode_literals
from django_pg.models.fields.array import ArrayField
from django_pg.models.fields.composite import CompositeField
from django_pg.models.fields.datetime_ import DateTimeField
from django_pg.models.fields.json import JSONField
from django_pg.models.fields.uuid import UUIDField
| from __future__ import absolute_import, unicode_literals
from django_pg.models.fields.array import ArrayField
from django_pg.models.fields.composite import CompositeField
from django_pg.models.fields.datetime_ import DateTimeField
from django_pg.models.fields.json import JSONField
from django_pg.models.fields.uuid import UUIDField
| none | 1 | 1.243742 | 1 |
|
aio_pika/robust_connection.py | nhumrich/aio-pika | 0 | 6633194 | <reponame>nhumrich/aio-pika
import asyncio
from functools import wraps
from logging import getLogger
from typing import Callable, Type
from weakref import WeakSet
from aiormq.connection import parse_bool, parse_int
from .connection import Connection, ConnectionType, connect
from .exceptions import CONNECTION_EXCEPTIONS
from .robust_channel import RobustChannel
from .tools import CallbackCollection
from .types import TimeoutType
log = getLogger(__name__)
def _ensure_connection(func):
@wraps(func)
def wrap(self, *args, **kwargs):
if self.is_closed:
raise RuntimeError("Connection closed")
return func(self, *args, **kwargs)
return wrap
class RobustConnection(Connection):
""" Robust connection """
CHANNEL_CLASS = RobustChannel
KWARGS_TYPES = (
("reconnect_interval", parse_int, "5"),
("fail_fast", parse_bool, "1"),
)
def __init__(self, url, loop=None, **kwargs):
super().__init__(url=url, loop=loop, **kwargs)
self.connect_kwargs = {}
self.reconnect_interval = self.kwargs["reconnect_interval"]
self.fail_fast = self.kwargs["fail_fast"]
self.__channels = WeakSet()
self._reconnect_callbacks = CallbackCollection(self)
self._connect_lock = asyncio.Lock()
self._closed = False
self.connected = asyncio.Event()
@property
def reconnecting(self) -> bool:
return self._connect_lock.locked()
@property
def reconnect_callbacks(self) -> CallbackCollection:
return self._reconnect_callbacks
@property
def _channels(self) -> dict:
return {ch.number: ch for ch in self.__channels}
def __repr__(self):
return '<{0}: "{1}" {2} channels>'.format(
self.__class__.__name__, str(self), len(self.__channels),
)
def _on_connection_close(self, connection, closing, *args, **kwargs):
if self.reconnecting:
return
self.connected.clear()
self.connection = None
super()._on_connection_close(connection, closing)
log.info(
"Connection to %s closed. Reconnecting after %r seconds.",
self,
self.reconnect_interval,
)
self.loop.call_later(
self.reconnect_interval,
lambda: self.loop.create_task(self.reconnect()),
)
def add_reconnect_callback(self, callback: Callable[[], None]):
""" Add callback which will be called after reconnect.
:return: None
"""
self._reconnect_callbacks.add(callback)
async def __cleanup_connection(self, exc):
if self.connection is None:
return
await asyncio.gather(
self.connection.close(exc), return_exceptions=True,
)
self.connection = None
async def connect(self, timeout: TimeoutType = None, **kwargs):
if self.is_closed:
raise RuntimeError("{!r} connection closed".format(self))
if kwargs:
# Store connect kwargs for reconnects
self.connect_kwargs = kwargs
if self.reconnecting:
log.warning(
"Connect method called but connection %r is "
"reconnecting right now.",
self,
)
async with self._connect_lock:
while True:
try:
result = await super().connect(
timeout=timeout, **self.connect_kwargs
)
for channel in self._channels.values():
await channel.reopen()
self.fail_fast = False
self.connected.set()
return result
except CONNECTION_EXCEPTIONS as e:
if self.fail_fast:
raise
await self.__cleanup_connection(e)
log.warning(
'Connection attempt to "%s" failed. '
"Reconnecting after %r seconds.",
self,
self.reconnect_interval,
exc_info=True,
)
except asyncio.CancelledError as e:
await self.__cleanup_connection(e)
raise
await asyncio.sleep(self.reconnect_interval)
async def reconnect(self):
await self.connect()
self._reconnect_callbacks(self)
def channel(
self,
channel_number: int = None,
publisher_confirms: bool = True,
on_return_raises=False,
):
channel = super().channel(
channel_number=channel_number,
publisher_confirms=publisher_confirms,
on_return_raises=on_return_raises,
)
self.__channels.add(channel)
return channel
@property
def is_closed(self):
""" Is this connection is closed """
return self._closed or super().is_closed
async def close(self, exc=asyncio.CancelledError):
if self.is_closed:
return
self._closed = True
if self.connection is None:
return
return await super().close(exc)
async def connect_robust(
url: str = None,
*,
host: str = "localhost",
port: int = 5672,
login: str = "guest",
password: str = "<PASSWORD>",
virtualhost: str = "/",
ssl: bool = False,
loop: asyncio.AbstractEventLoop = None,
ssl_options: dict = None,
timeout: TimeoutType = None,
connection_class: Type[ConnectionType] = RobustConnection,
client_properties: dict = None,
**kwargs
) -> ConnectionType:
""" Make robust connection to the broker.
That means that connection state will be restored after reconnect.
After connection has been established the channels, the queues and the
exchanges with their bindings will be restored.
Example:
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect_robust(
"amqp://guest:guest@127.0.0.1/"
)
Connect to localhost with default credentials:
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect_robust()
.. note::
The available keys for ssl_options parameter are:
* cert_reqs
* certfile
* keyfile
* ssl_version
For an information on what the ssl_options can be set to reference the
`official Python documentation`_ .
URL string might be contain ssl parameters e.g.
`amqps://user:pass@host//?ca_certs=ca.pem&certfile=crt.pem&keyfile=key.pem`
:param url:
RFC3986_ formatted broker address. When :class:`None`
will be used keyword arguments.
:param host: hostname of the broker
:param port: broker port 5672 by default
:param login: username string. `'guest'` by default.
:param password: password string. `'<PASSWORD>'` by default.
:param virtualhost: virtualhost parameter. `'/'` by default
:param ssl: use SSL for connection. Should be used with addition kwargs.
:param ssl_options: A dict of values for the SSL connection.
:param timeout: connection timeout in seconds
:param loop:
Event loop (:func:`asyncio.get_event_loop()` when :class:`None`)
:param connection_class: Factory of a new connection
:param kwargs: addition parameters which will be passed to the connection.
:return: :class:`aio_pika.connection.Connection`
.. _RFC3986: https://goo.gl/MzgYAs
.. _official Python documentation: https://goo.gl/pty9xA
"""
return await connect(
url=url,
host=host,
port=port,
login=login,
password=password,
virtualhost=virtualhost,
ssl=ssl,
loop=loop,
connection_class=connection_class,
ssl_options=ssl_options,
timeout=timeout,
client_properties=client_properties,
**kwargs
)
__all__ = (
"RobustConnection",
"connect_robust",
)
| import asyncio
from functools import wraps
from logging import getLogger
from typing import Callable, Type
from weakref import WeakSet
from aiormq.connection import parse_bool, parse_int
from .connection import Connection, ConnectionType, connect
from .exceptions import CONNECTION_EXCEPTIONS
from .robust_channel import RobustChannel
from .tools import CallbackCollection
from .types import TimeoutType
log = getLogger(__name__)
def _ensure_connection(func):
@wraps(func)
def wrap(self, *args, **kwargs):
if self.is_closed:
raise RuntimeError("Connection closed")
return func(self, *args, **kwargs)
return wrap
class RobustConnection(Connection):
""" Robust connection """
CHANNEL_CLASS = RobustChannel
KWARGS_TYPES = (
("reconnect_interval", parse_int, "5"),
("fail_fast", parse_bool, "1"),
)
def __init__(self, url, loop=None, **kwargs):
super().__init__(url=url, loop=loop, **kwargs)
self.connect_kwargs = {}
self.reconnect_interval = self.kwargs["reconnect_interval"]
self.fail_fast = self.kwargs["fail_fast"]
self.__channels = WeakSet()
self._reconnect_callbacks = CallbackCollection(self)
self._connect_lock = asyncio.Lock()
self._closed = False
self.connected = asyncio.Event()
@property
def reconnecting(self) -> bool:
return self._connect_lock.locked()
@property
def reconnect_callbacks(self) -> CallbackCollection:
return self._reconnect_callbacks
@property
def _channels(self) -> dict:
return {ch.number: ch for ch in self.__channels}
def __repr__(self):
return '<{0}: "{1}" {2} channels>'.format(
self.__class__.__name__, str(self), len(self.__channels),
)
def _on_connection_close(self, connection, closing, *args, **kwargs):
if self.reconnecting:
return
self.connected.clear()
self.connection = None
super()._on_connection_close(connection, closing)
log.info(
"Connection to %s closed. Reconnecting after %r seconds.",
self,
self.reconnect_interval,
)
self.loop.call_later(
self.reconnect_interval,
lambda: self.loop.create_task(self.reconnect()),
)
def add_reconnect_callback(self, callback: Callable[[], None]):
""" Add callback which will be called after reconnect.
:return: None
"""
self._reconnect_callbacks.add(callback)
async def __cleanup_connection(self, exc):
if self.connection is None:
return
await asyncio.gather(
self.connection.close(exc), return_exceptions=True,
)
self.connection = None
async def connect(self, timeout: TimeoutType = None, **kwargs):
if self.is_closed:
raise RuntimeError("{!r} connection closed".format(self))
if kwargs:
# Store connect kwargs for reconnects
self.connect_kwargs = kwargs
if self.reconnecting:
log.warning(
"Connect method called but connection %r is "
"reconnecting right now.",
self,
)
async with self._connect_lock:
while True:
try:
result = await super().connect(
timeout=timeout, **self.connect_kwargs
)
for channel in self._channels.values():
await channel.reopen()
self.fail_fast = False
self.connected.set()
return result
except CONNECTION_EXCEPTIONS as e:
if self.fail_fast:
raise
await self.__cleanup_connection(e)
log.warning(
'Connection attempt to "%s" failed. '
"Reconnecting after %r seconds.",
self,
self.reconnect_interval,
exc_info=True,
)
except asyncio.CancelledError as e:
await self.__cleanup_connection(e)
raise
await asyncio.sleep(self.reconnect_interval)
async def reconnect(self):
await self.connect()
self._reconnect_callbacks(self)
def channel(
self,
channel_number: int = None,
publisher_confirms: bool = True,
on_return_raises=False,
):
channel = super().channel(
channel_number=channel_number,
publisher_confirms=publisher_confirms,
on_return_raises=on_return_raises,
)
self.__channels.add(channel)
return channel
@property
def is_closed(self):
""" Is this connection is closed """
return self._closed or super().is_closed
async def close(self, exc=asyncio.CancelledError):
if self.is_closed:
return
self._closed = True
if self.connection is None:
return
return await super().close(exc)
async def connect_robust(
url: str = None,
*,
host: str = "localhost",
port: int = 5672,
login: str = "guest",
password: str = "<PASSWORD>",
virtualhost: str = "/",
ssl: bool = False,
loop: asyncio.AbstractEventLoop = None,
ssl_options: dict = None,
timeout: TimeoutType = None,
connection_class: Type[ConnectionType] = RobustConnection,
client_properties: dict = None,
**kwargs
) -> ConnectionType:
""" Make robust connection to the broker.
That means that connection state will be restored after reconnect.
After connection has been established the channels, the queues and the
exchanges with their bindings will be restored.
Example:
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect_robust(
"amqp://guest:guest@127.0.0.1/"
)
Connect to localhost with default credentials:
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect_robust()
.. note::
The available keys for ssl_options parameter are:
* cert_reqs
* certfile
* keyfile
* ssl_version
For an information on what the ssl_options can be set to reference the
`official Python documentation`_ .
URL string might be contain ssl parameters e.g.
`amqps://user:pass@host//?ca_certs=ca.pem&certfile=crt.pem&keyfile=key.pem`
:param url:
RFC3986_ formatted broker address. When :class:`None`
will be used keyword arguments.
:param host: hostname of the broker
:param port: broker port 5672 by default
:param login: username string. `'guest'` by default.
:param password: password string. `'<PASSWORD>'` by default.
:param virtualhost: virtualhost parameter. `'/'` by default
:param ssl: use SSL for connection. Should be used with addition kwargs.
:param ssl_options: A dict of values for the SSL connection.
:param timeout: connection timeout in seconds
:param loop:
Event loop (:func:`asyncio.get_event_loop()` when :class:`None`)
:param connection_class: Factory of a new connection
:param kwargs: addition parameters which will be passed to the connection.
:return: :class:`aio_pika.connection.Connection`
.. _RFC3986: https://goo.gl/MzgYAs
.. _official Python documentation: https://goo.gl/pty9xA
"""
return await connect(
url=url,
host=host,
port=port,
login=login,
password=password,
virtualhost=virtualhost,
ssl=ssl,
loop=loop,
connection_class=connection_class,
ssl_options=ssl_options,
timeout=timeout,
client_properties=client_properties,
**kwargs
)
__all__ = (
"RobustConnection",
"connect_robust",
) | en | 0.67387 | Robust connection Add callback which will be called after reconnect. :return: None # Store connect kwargs for reconnects Is this connection is closed Make robust connection to the broker. That means that connection state will be restored after reconnect. After connection has been established the channels, the queues and the exchanges with their bindings will be restored. Example: .. code-block:: python import aio_pika async def main(): connection = await aio_pika.connect_robust( "amqp://guest:guest@127.0.0.1/" ) Connect to localhost with default credentials: .. code-block:: python import aio_pika async def main(): connection = await aio_pika.connect_robust() .. note:: The available keys for ssl_options parameter are: * cert_reqs * certfile * keyfile * ssl_version For an information on what the ssl_options can be set to reference the `official Python documentation`_ . URL string might be contain ssl parameters e.g. `amqps://user:pass@host//?ca_certs=ca.pem&certfile=crt.pem&keyfile=key.pem` :param url: RFC3986_ formatted broker address. When :class:`None` will be used keyword arguments. :param host: hostname of the broker :param port: broker port 5672 by default :param login: username string. `'guest'` by default. :param password: password string. `'<PASSWORD>'` by default. :param virtualhost: virtualhost parameter. `'/'` by default :param ssl: use SSL for connection. Should be used with addition kwargs. :param ssl_options: A dict of values for the SSL connection. :param timeout: connection timeout in seconds :param loop: Event loop (:func:`asyncio.get_event_loop()` when :class:`None`) :param connection_class: Factory of a new connection :param kwargs: addition parameters which will be passed to the connection. :return: :class:`aio_pika.connection.Connection` .. _RFC3986: https://goo.gl/MzgYAs .. _official Python documentation: https://goo.gl/pty9xA | 2.088634 | 2 |
example/0_Basic_usage_of_the_library/openCV/18-对象测量.py | RecluseXU/learning_spider | 38 | 6633195 | # -*- encoding: utf-8 -*-
'''
@Time : 2018-3-27
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc : 对象测量
弧长与面积
计算单位是像素
计算弧长和面积的前提条件是进行 轮廓发现
多边形拟合
获取轮廓的多边形拟合效果
approxPolyDP
contour
epsilon 越小折线越逼近真实形状
close 是否为封闭区域
几何矩计算
见 18-几何矩计算
'''
# here put the import lib
import cv2 as cv
import numpy as np
def measure_object(image):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) # 灰度
ret, binary = cv.threshold(
gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU) # 二分化
# 需要注意的是这里用的是 cv.THRESH_BINARY_INV
# 因为白的东西才能被识别,你看看灰度图就懂了。
print('threshold value %s' % ret)
cv.imshow('binary image', binary)
contours, hireachy = cv.findContours(
binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # 轮廓发现
print(str(contours).replace(' ', '').replace('\n', ' '))
for i, contour in enumerate(contours): # 遍历识别结果
area = cv.contourArea(contour)
x, y, w, h = cv.boundingRect(contour)
print('rectangle rate:', min(w, h)/max(w, h))
mm = cv.moments(contour) # 获得几何矩
if(mm['m00'] != 0):
# 由于m00这个东西可能为0,下面又用它来除东西,所以做个判断
cx = mm['m10']/mm['m00']
cy = mm['m01']/mm['m00']
cv.circle(image, (np.int(cx), np.int(cy)),
3, (0, 0, 255), -1) # 圆形画重心
cv.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 2) # 正方形画外形
approxCurve = cv.approxPolyDP(contour, 4, True)
print(approxCurve.shape)
# 尝试根据识别外形的边数来画图
if (approxCurve.shape[0] > 10): # 当判断的图像的边数>10
cv.drawContours(image, contours, i, (255, 0, 0), 2)
if(approxCurve.shape[0] == 4): # 当是四边形
cv.drawContours(image, contours, i, (255, 255, 0), 2)
cv.imshow('measure-contours', image)
src = cv.imread(
'example/0_Basic_usage_of_the_library/openCV/picture/goodmancard.jpg')
cv.imshow('src', src)
measure_object(src)
cv.waitKey(0)
cv.destroyAllWindows()
| # -*- encoding: utf-8 -*-
'''
@Time : 2018-3-27
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc : 对象测量
弧长与面积
计算单位是像素
计算弧长和面积的前提条件是进行 轮廓发现
多边形拟合
获取轮廓的多边形拟合效果
approxPolyDP
contour
epsilon 越小折线越逼近真实形状
close 是否为封闭区域
几何矩计算
见 18-几何矩计算
'''
# here put the import lib
import cv2 as cv
import numpy as np
def measure_object(image):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) # 灰度
ret, binary = cv.threshold(
gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU) # 二分化
# 需要注意的是这里用的是 cv.THRESH_BINARY_INV
# 因为白的东西才能被识别,你看看灰度图就懂了。
print('threshold value %s' % ret)
cv.imshow('binary image', binary)
contours, hireachy = cv.findContours(
binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # 轮廓发现
print(str(contours).replace(' ', '').replace('\n', ' '))
for i, contour in enumerate(contours): # 遍历识别结果
area = cv.contourArea(contour)
x, y, w, h = cv.boundingRect(contour)
print('rectangle rate:', min(w, h)/max(w, h))
mm = cv.moments(contour) # 获得几何矩
if(mm['m00'] != 0):
# 由于m00这个东西可能为0,下面又用它来除东西,所以做个判断
cx = mm['m10']/mm['m00']
cy = mm['m01']/mm['m00']
cv.circle(image, (np.int(cx), np.int(cy)),
3, (0, 0, 255), -1) # 圆形画重心
cv.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 2) # 正方形画外形
approxCurve = cv.approxPolyDP(contour, 4, True)
print(approxCurve.shape)
# 尝试根据识别外形的边数来画图
if (approxCurve.shape[0] > 10): # 当判断的图像的边数>10
cv.drawContours(image, contours, i, (255, 0, 0), 2)
if(approxCurve.shape[0] == 4): # 当是四边形
cv.drawContours(image, contours, i, (255, 255, 0), 2)
cv.imshow('measure-contours', image)
src = cv.imread(
'example/0_Basic_usage_of_the_library/openCV/picture/goodmancard.jpg')
cv.imshow('src', src)
measure_object(src)
cv.waitKey(0)
cv.destroyAllWindows()
| zh | 0.882079 | # -*- encoding: utf-8 -*- @Time : 2018-3-27 @Author : EvilRecluse @Contact : https://github.com/RecluseXU @Desc : 对象测量 弧长与面积 计算单位是像素 计算弧长和面积的前提条件是进行 轮廓发现 多边形拟合 获取轮廓的多边形拟合效果 approxPolyDP contour epsilon 越小折线越逼近真实形状 close 是否为封闭区域 几何矩计算 见 18-几何矩计算 # here put the import lib # 灰度 # 二分化 # 需要注意的是这里用的是 cv.THRESH_BINARY_INV # 因为白的东西才能被识别,你看看灰度图就懂了。 # 轮廓发现 # 遍历识别结果 # 获得几何矩 # 由于m00这个东西可能为0,下面又用它来除东西,所以做个判断 # 圆形画重心 # 正方形画外形 # 尝试根据识别外形的边数来画图 # 当判断的图像的边数>10 # 当是四边形 | 2.825117 | 3 |
generate_test_src.py | rysiof/makefile-template | 1 | 6633196 | #! /usr/bin/python
# MIT License
#
# Copyright (c) 2020 <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
seed = int(sys.argv[1]) # how many file
target = sys.argv[2]
os.system("mkdir src")
os.system("mkdir " + os.path.join("src", target))
template_h = """#ifndef DIR_{idx}_H
#define DIR_{idx}_H
#include "common.h"
void cc_{idx}();
void cpp_{idx}();
void cxx_{idx}();
#ifdef __cplusplus
extern "C" {{
#endif // __cplusplus
void c_{idx}();
#ifdef __cplusplus
}}
#endif // __cplusplus
#endif // DIR_{idx}_H
"""
template_cc = """#include <stdio.h>
#include "f_{idx}.h"
void {type}_{idx}()
{{
printf("{type}_{idx}();\\n");
}}
"""
template_c = """#include <stdio.h>
#include "f_{idx}.h"
void c_{idx}()
{{
printf("c_{idx}();\\n");
}}
"""
main_headers = ""
main_calls = "int main(){{\n"
main_ends = "}}"
for i in range(seed):
dir = os.path.join("src", target, "dir_{idx}".format(idx=i))
os.system("mkdir " + dir)
f = open(os.path.join(dir, "f_{idx}.h".format(idx=i)), "w")
f.write(template_h.format(idx=i));
f.close()
for t in ["cc", "cxx", "cpp"]:
f = open(os.path.join(dir, "{type}_{idx}.{type}".format(type=t,idx=i)), "w")
f.write(template_cc.format(idx=i, type=t));
f.close()
f = f = open(os.path.join(dir, "c_{idx}.c".format(idx=i)), "w")
f.write(template_c.format(idx=i));
f.close()
main_headers += "#include \"dir_{idx}/f_{idx}.h\"\n".format(idx=i)
for t in ["cc", "cxx", "cpp"]:
main_calls += "\t{type}_{idx}();\n".format(idx=i, type=t)
main_calls += "\tc_{idx}();\n".format(idx=i)
f = open(os.path.join("src", target, "main.cc"), "w")
f.write(main_headers + main_calls + main_ends)
f.close()
f = open(os.path.join("src", target, "common.h"), "w")
f.write("")
f.close()
| #! /usr/bin/python
# MIT License
#
# Copyright (c) 2020 <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
seed = int(sys.argv[1]) # how many file
target = sys.argv[2]
os.system("mkdir src")
os.system("mkdir " + os.path.join("src", target))
template_h = """#ifndef DIR_{idx}_H
#define DIR_{idx}_H
#include "common.h"
void cc_{idx}();
void cpp_{idx}();
void cxx_{idx}();
#ifdef __cplusplus
extern "C" {{
#endif // __cplusplus
void c_{idx}();
#ifdef __cplusplus
}}
#endif // __cplusplus
#endif // DIR_{idx}_H
"""
template_cc = """#include <stdio.h>
#include "f_{idx}.h"
void {type}_{idx}()
{{
printf("{type}_{idx}();\\n");
}}
"""
template_c = """#include <stdio.h>
#include "f_{idx}.h"
void c_{idx}()
{{
printf("c_{idx}();\\n");
}}
"""
main_headers = ""
main_calls = "int main(){{\n"
main_ends = "}}"
for i in range(seed):
dir = os.path.join("src", target, "dir_{idx}".format(idx=i))
os.system("mkdir " + dir)
f = open(os.path.join(dir, "f_{idx}.h".format(idx=i)), "w")
f.write(template_h.format(idx=i));
f.close()
for t in ["cc", "cxx", "cpp"]:
f = open(os.path.join(dir, "{type}_{idx}.{type}".format(type=t,idx=i)), "w")
f.write(template_cc.format(idx=i, type=t));
f.close()
f = f = open(os.path.join(dir, "c_{idx}.c".format(idx=i)), "w")
f.write(template_c.format(idx=i));
f.close()
main_headers += "#include \"dir_{idx}/f_{idx}.h\"\n".format(idx=i)
for t in ["cc", "cxx", "cpp"]:
main_calls += "\t{type}_{idx}();\n".format(idx=i, type=t)
main_calls += "\tc_{idx}();\n".format(idx=i)
f = open(os.path.join("src", target, "main.cc"), "w")
f.write(main_headers + main_calls + main_ends)
f.close()
f = open(os.path.join("src", target, "common.h"), "w")
f.write("")
f.close()
| en | 0.552536 | #! /usr/bin/python # MIT License # # Copyright (c) 2020 <EMAIL> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # how many file #ifndef DIR_{idx}_H #define DIR_{idx}_H #include "common.h" void cc_{idx}(); void cpp_{idx}(); void cxx_{idx}(); #ifdef __cplusplus extern "C" {{ #endif // __cplusplus void c_{idx}(); #ifdef __cplusplus }} #endif // __cplusplus #endif // DIR_{idx}_H #include <stdio.h> #include "f_{idx}.h" void {type}_{idx}() {{ printf("{type}_{idx}();\\n"); }} #include <stdio.h> #include "f_{idx}.h" void c_{idx}() {{ printf("c_{idx}();\\n"); }} | 1.881361 | 2 |
PyBS/rpcclient.py | thusser/pybs | 0 | 6633197 | import asyncio
import json
class RpcError(Exception):
"""Exception for all RPC errors."""
pass
class RpcClient:
"""Client for remote procedure calls."""
def __init__(self, host: str = 'localhost', port: int = 16219):
"""Create a new RPC client.
Args:
host: Hostname of server.
port: Port on server to connect to.
"""
self._cur_id = 1
self._host = host
self._port = port
def __call__(self, command: str, **kwargs):
"""Calls a command on the server.
Args:
command: Name of command to run.
**kwargs: Parameters for command
Returns:
Result of command.
"""
# get event loop, run command and return result
loop = asyncio.get_event_loop()
result = loop.run_until_complete(self._send_command(command, **kwargs))
return result
async def _send_command(self, command: str, **kwargs):
"""Actually send a command to the server and wait for results.
Args:
command: Name of command to run.
**kwargs: Parameters for command
Returns:
Result of command.
"""
# open connection
reader, writer = await asyncio.open_connection(self._host, self._port)
# build message
message = {
'jsonrpc': '2.0',
'method': command,
'params': kwargs,
'id': self._cur_id
}
self._cur_id += 1
# send command
writer.write((json.dumps(message) + '\n').encode())
# wait for reply
data = await reader.readline()
# close socket
writer.close()
# decode data
rpc = json.loads(data.decode())
# got an error?
if 'error' in rpc:
# with a message?
if 'message' in rpc['error']:
raise RpcError(rpc['error']['message'])
raise RpcError('Unknown error')
# return result
return rpc['result']
__all__ = ['RpcClient', 'RpcError']
| import asyncio
import json
class RpcError(Exception):
"""Exception for all RPC errors."""
pass
class RpcClient:
"""Client for remote procedure calls."""
def __init__(self, host: str = 'localhost', port: int = 16219):
"""Create a new RPC client.
Args:
host: Hostname of server.
port: Port on server to connect to.
"""
self._cur_id = 1
self._host = host
self._port = port
def __call__(self, command: str, **kwargs):
"""Calls a command on the server.
Args:
command: Name of command to run.
**kwargs: Parameters for command
Returns:
Result of command.
"""
# get event loop, run command and return result
loop = asyncio.get_event_loop()
result = loop.run_until_complete(self._send_command(command, **kwargs))
return result
async def _send_command(self, command: str, **kwargs):
"""Actually send a command to the server and wait for results.
Args:
command: Name of command to run.
**kwargs: Parameters for command
Returns:
Result of command.
"""
# open connection
reader, writer = await asyncio.open_connection(self._host, self._port)
# build message
message = {
'jsonrpc': '2.0',
'method': command,
'params': kwargs,
'id': self._cur_id
}
self._cur_id += 1
# send command
writer.write((json.dumps(message) + '\n').encode())
# wait for reply
data = await reader.readline()
# close socket
writer.close()
# decode data
rpc = json.loads(data.decode())
# got an error?
if 'error' in rpc:
# with a message?
if 'message' in rpc['error']:
raise RpcError(rpc['error']['message'])
raise RpcError('Unknown error')
# return result
return rpc['result']
__all__ = ['RpcClient', 'RpcError']
| en | 0.73724 | Exception for all RPC errors. Client for remote procedure calls. Create a new RPC client. Args: host: Hostname of server. port: Port on server to connect to. Calls a command on the server. Args: command: Name of command to run. **kwargs: Parameters for command Returns: Result of command. # get event loop, run command and return result Actually send a command to the server and wait for results. Args: command: Name of command to run. **kwargs: Parameters for command Returns: Result of command. # open connection # build message # send command # wait for reply # close socket # decode data # got an error? # with a message? # return result | 3.110602 | 3 |
examples/sequencer_scripting422.py | DreamingPoet/UnrealEnginePython | 14 | 6633198 | # the Sequencer API support has been sponsored by <NAME> (http://www.mattwhelan.com/)
import unreal_engine as ue
from unreal_engine.classes import MovieSceneAudioTrack, LevelSequenceFactoryNew, MovieSceneSkeletalAnimationTrack, Character, SkeletalMesh, MovieScene3DTransformTrack, CineCameraActor, AnimSequence
import time
from unreal_engine.structs import FloatRange, FloatRangeBound, MovieSceneObjectBindingID
from unreal_engine import FTransform, FVector
from unreal_engine.enums import EMovieSceneObjectBindingSpace
# create a new level sequence asset
factory = LevelSequenceFactoryNew()
seq = factory.factory_create_new('/Game/MovieMaster' + str(int(time.time())))
if ue.ENGINE_MINOR_VERSION >= 20:
print(seq.MovieScene.TickResolution.Numerator)
seq.sequencer_set_playback_range(0, 30)
# add an audio track (without sound section ;) to the sequence
audio = seq.sequencer_add_master_track(MovieSceneAudioTrack)
# get a reference to the editor world (to spawn actors)
world = ue.get_editor_world()
# spawn a new character and modify it (post_edit_change will allow the editor/sequencer to be notified of actor updates)
character = world.actor_spawn(Character)
# notify modifications are about to happen...
character.modify()
character.Mesh.SkeletalMesh = ue.load_object(SkeletalMesh, '/Game/Mannequin/Character/Mesh/SK_Mannequin.SK_Mannequin')
# finalize the actor
character.post_edit_change()
# add to the sequencer as a possessable (shortcut method returning the guid as string)
guid = seq.sequencer_add_actor(character)
# add an animation track mapped to the just added actor
anim = seq.sequencer_add_track(MovieSceneSkeletalAnimationTrack, guid)
# create 3 animations sections (assign AnimSequence field to set the animation to play)
anim_sequence = anim.sequencer_track_add_section()
anim_sequence.sequencer_set_section_range(1, 3)
anim_sequence.Params.Animation = ue.load_object(AnimSequence, '/Game/Mannequin/Animations/ThirdPersonRun.ThirdPersonRun')
anim_sequence.RowIndex = 0
anim_sequence2 = anim.sequencer_track_add_section()
anim_sequence2.RowIndex = 1
anim_sequence2.sequencer_set_section_range(2, 5)
anim_sequence3 = anim.sequencer_track_add_section()
anim_sequence3.RowIndex = 1
anim_sequence3.SlotName = 'Hello'
anim_sequence3.sequencer_set_section_range(0, 30)
# add a transform track/section in one shot to the actor
transform = seq.sequencer_add_track(MovieScene3DTransformTrack, guid).sequencer_track_add_section()
transform.sequencer_set_section_range(0, 50)
# add keyframes to the transform section (from 4.20 you can directly use teh reflection api, and the methods returns the frame numbers)
print(transform.sequencer_section_add_key(0, FTransform(FVector(0, 0, 17 * 100))))
print(transform.sequencer_section_add_key(1.1, FTransform(FVector(0, 0, 22 * 100))))
print(transform.sequencer_section_add_key(2.2, FTransform(FVector(0, 0, 26 * 100))))
print(transform.sequencer_section_add_key(3.3, FTransform(FVector(0, 0, 30 * 100))))
# add camera cut track (can be only one)
camera_cut_track = seq.sequencer_add_camera_cut_track()
# add two camera views
camera1 = camera_cut_track.sequencer_track_add_section()
camera2 = camera_cut_track.sequencer_track_add_section()
# spawn 2 cine cameras in the stage and posses them with the sequencer
(cine_camera, camera_guid) = seq.sequencer_add_camera()
(cine_camera2, camera2_guid) = seq.sequencer_add_camera()
# assign the two cameras to the camera cut sections (via binding id)
camera1.CameraBindingID = MovieSceneObjectBindingID( Guid=ue.string_to_guid( camera_guid ), Space=EMovieSceneObjectBindingSpace.Local )
camera2.CameraBindingID = MovieSceneObjectBindingID( Guid=ue.string_to_guid( camera2_guid ), Space=EMovieSceneObjectBindingSpace.Local )
# set cameras ranges
camera2.sequencer_set_section_range(0.5, 17)
camera1.sequencer_set_section_range(3.5, 5)
# notify the sequence editor that something heavily changed (True will focus to the sequence editor)
seq.sequencer_changed(True)
| # the Sequencer API support has been sponsored by <NAME> (http://www.mattwhelan.com/)
import unreal_engine as ue
from unreal_engine.classes import MovieSceneAudioTrack, LevelSequenceFactoryNew, MovieSceneSkeletalAnimationTrack, Character, SkeletalMesh, MovieScene3DTransformTrack, CineCameraActor, AnimSequence
import time
from unreal_engine.structs import FloatRange, FloatRangeBound, MovieSceneObjectBindingID
from unreal_engine import FTransform, FVector
from unreal_engine.enums import EMovieSceneObjectBindingSpace
# create a new level sequence asset
factory = LevelSequenceFactoryNew()
seq = factory.factory_create_new('/Game/MovieMaster' + str(int(time.time())))
if ue.ENGINE_MINOR_VERSION >= 20:
print(seq.MovieScene.TickResolution.Numerator)
seq.sequencer_set_playback_range(0, 30)
# add an audio track (without sound section ;) to the sequence
audio = seq.sequencer_add_master_track(MovieSceneAudioTrack)
# get a reference to the editor world (to spawn actors)
world = ue.get_editor_world()
# spawn a new character and modify it (post_edit_change will allow the editor/sequencer to be notified of actor updates)
character = world.actor_spawn(Character)
# notify modifications are about to happen...
character.modify()
character.Mesh.SkeletalMesh = ue.load_object(SkeletalMesh, '/Game/Mannequin/Character/Mesh/SK_Mannequin.SK_Mannequin')
# finalize the actor
character.post_edit_change()
# add to the sequencer as a possessable (shortcut method returning the guid as string)
guid = seq.sequencer_add_actor(character)
# add an animation track mapped to the just added actor
anim = seq.sequencer_add_track(MovieSceneSkeletalAnimationTrack, guid)
# create 3 animations sections (assign AnimSequence field to set the animation to play)
anim_sequence = anim.sequencer_track_add_section()
anim_sequence.sequencer_set_section_range(1, 3)
anim_sequence.Params.Animation = ue.load_object(AnimSequence, '/Game/Mannequin/Animations/ThirdPersonRun.ThirdPersonRun')
anim_sequence.RowIndex = 0
anim_sequence2 = anim.sequencer_track_add_section()
anim_sequence2.RowIndex = 1
anim_sequence2.sequencer_set_section_range(2, 5)
anim_sequence3 = anim.sequencer_track_add_section()
anim_sequence3.RowIndex = 1
anim_sequence3.SlotName = 'Hello'
anim_sequence3.sequencer_set_section_range(0, 30)
# add a transform track/section in one shot to the actor
transform = seq.sequencer_add_track(MovieScene3DTransformTrack, guid).sequencer_track_add_section()
transform.sequencer_set_section_range(0, 50)
# add keyframes to the transform section (from 4.20 you can directly use teh reflection api, and the methods returns the frame numbers)
print(transform.sequencer_section_add_key(0, FTransform(FVector(0, 0, 17 * 100))))
print(transform.sequencer_section_add_key(1.1, FTransform(FVector(0, 0, 22 * 100))))
print(transform.sequencer_section_add_key(2.2, FTransform(FVector(0, 0, 26 * 100))))
print(transform.sequencer_section_add_key(3.3, FTransform(FVector(0, 0, 30 * 100))))
# add camera cut track (can be only one)
camera_cut_track = seq.sequencer_add_camera_cut_track()
# add two camera views
camera1 = camera_cut_track.sequencer_track_add_section()
camera2 = camera_cut_track.sequencer_track_add_section()
# spawn 2 cine cameras in the stage and posses them with the sequencer
(cine_camera, camera_guid) = seq.sequencer_add_camera()
(cine_camera2, camera2_guid) = seq.sequencer_add_camera()
# assign the two cameras to the camera cut sections (via binding id)
camera1.CameraBindingID = MovieSceneObjectBindingID( Guid=ue.string_to_guid( camera_guid ), Space=EMovieSceneObjectBindingSpace.Local )
camera2.CameraBindingID = MovieSceneObjectBindingID( Guid=ue.string_to_guid( camera2_guid ), Space=EMovieSceneObjectBindingSpace.Local )
# set cameras ranges
camera2.sequencer_set_section_range(0.5, 17)
camera1.sequencer_set_section_range(3.5, 5)
# notify the sequence editor that something heavily changed (True will focus to the sequence editor)
seq.sequencer_changed(True)
| en | 0.899719 | # the Sequencer API support has been sponsored by <NAME> (http://www.mattwhelan.com/) # create a new level sequence asset # add an audio track (without sound section ;) to the sequence # get a reference to the editor world (to spawn actors) # spawn a new character and modify it (post_edit_change will allow the editor/sequencer to be notified of actor updates) # notify modifications are about to happen... # finalize the actor # add to the sequencer as a possessable (shortcut method returning the guid as string) # add an animation track mapped to the just added actor # create 3 animations sections (assign AnimSequence field to set the animation to play) # add a transform track/section in one shot to the actor # add keyframes to the transform section (from 4.20 you can directly use teh reflection api, and the methods returns the frame numbers) # add camera cut track (can be only one) # add two camera views # spawn 2 cine cameras in the stage and posses them with the sequencer # assign the two cameras to the camera cut sections (via binding id) # set cameras ranges # notify the sequence editor that something heavily changed (True will focus to the sequence editor) | 2.209326 | 2 |
job/src/scripts/job_distributor_nuvlabox_state_check.py | slipstream/SlipStreamJobEngine | 0 | 6633199 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
from slipstream.job.base import main
from slipstream.job.distributor import Distributor
from slipstream.job.util import override
class NuvlaBoxStateCheckDistributor(Distributor):
ACTION_NAME = 'nuvlabox_state_check'
def __init__(self):
super(NuvlaBoxStateCheckDistributor, self).__init__()
self.distribute_interval = 600.0 # 10 minutes
@override
def job_generator(self):
while True:
job = {'action': NuvlaBoxStateCheckDistributor.ACTION_NAME,
'targetResource': {'href': 'job'}}
yield job
time.sleep(self.distribute_interval)
@override
def _get_jobs_type(self):
return 'nuvlabox_state_check'
if __name__ == '__main__':
main(NuvlaBoxStateCheckDistributor)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
from slipstream.job.base import main
from slipstream.job.distributor import Distributor
from slipstream.job.util import override
class NuvlaBoxStateCheckDistributor(Distributor):
ACTION_NAME = 'nuvlabox_state_check'
def __init__(self):
super(NuvlaBoxStateCheckDistributor, self).__init__()
self.distribute_interval = 600.0 # 10 minutes
@override
def job_generator(self):
while True:
job = {'action': NuvlaBoxStateCheckDistributor.ACTION_NAME,
'targetResource': {'href': 'job'}}
yield job
time.sleep(self.distribute_interval)
@override
def _get_jobs_type(self):
return 'nuvlabox_state_check'
if __name__ == '__main__':
main(NuvlaBoxStateCheckDistributor)
| en | 0.411553 | #!/usr/bin/env python # -*- coding: utf-8 -*- # 10 minutes | 2.09704 | 2 |
11-things/tf-11-one.py | BestByte/exercises-in-programming-style | 0 | 6633200 | import sys,re,operator,string
from abc import ABCMeta
class InfoAbstaract():
__metaclass__=ABCMeta
def info(self):
return self.__class__.__name__
class DataStore(InfoAbstaract):
'''
'''
def __init__(self,path_to_file):
with open(path_to_file) as f:
self._data=f.read()
pattern=re.compile('[\W_]+')
self._data=pattern.sub('',self._data).lower()
def words(self):
'''
'''
return self._data.split()
def info(self):
return super(DataStore,self).info()+"My major data is a "+self._data.__class__.__name__
class StopWordManager(InfoAbstaract):
def __init__(self):
with open("../stop_words.txt") as f:
self._stop_words=f.read().split(',')
self._stop_words.extend(list(string.ascii_letters))
def is_stop_word(self,word):
return word in self._stop_words
def info(self):
return super(StopWordManager,self).info()+" my major is a"+self._stop_words.__class__.__name__
class WordFreqManager(InfoAbstaract):
'''
词汇统计
'''
def __init__(self):
self._word_freqs={}
def increment_count(self, word):
if word in self.__word_freqs:
self.__word_freqs[word] += 1
else:
self.__word_freqs[word] = 1
def sorted(self):
return sorted(self.__word_freqs.iteritems(),key=operator.itemgetter(1),reverse=True)
def info(self):
return super(WordFreqManager,self).info()+"my magor is "+self.__word_freqs.__class__.__name__
class WordFrequencyController(InfoAbstaract):
def __init__(self, path_to_file):
self._storage_manager = DataStore(path_to_file)
self._stop_word_manager = StopWordManager()
self._word_freq_manager = WordFreqManager()
def run(self):
for w in self._storage_manager.words():
if not self._stop_word_manager.is_stop_word(w):
self._word_freq_manager.increment_count(w)
word_freqs = self._word_freq_manager.sorted()
for (w, c) in word_freqs[0:25]:
print(w, ' - ', c)
#
# The main function
#
WordFrequencyController(sys.argv[1]).run()
| import sys,re,operator,string
from abc import ABCMeta
class InfoAbstaract():
__metaclass__=ABCMeta
def info(self):
return self.__class__.__name__
class DataStore(InfoAbstaract):
'''
'''
def __init__(self,path_to_file):
with open(path_to_file) as f:
self._data=f.read()
pattern=re.compile('[\W_]+')
self._data=pattern.sub('',self._data).lower()
def words(self):
'''
'''
return self._data.split()
def info(self):
return super(DataStore,self).info()+"My major data is a "+self._data.__class__.__name__
class StopWordManager(InfoAbstaract):
def __init__(self):
with open("../stop_words.txt") as f:
self._stop_words=f.read().split(',')
self._stop_words.extend(list(string.ascii_letters))
def is_stop_word(self,word):
return word in self._stop_words
def info(self):
return super(StopWordManager,self).info()+" my major is a"+self._stop_words.__class__.__name__
class WordFreqManager(InfoAbstaract):
'''
词汇统计
'''
def __init__(self):
self._word_freqs={}
def increment_count(self, word):
if word in self.__word_freqs:
self.__word_freqs[word] += 1
else:
self.__word_freqs[word] = 1
def sorted(self):
return sorted(self.__word_freqs.iteritems(),key=operator.itemgetter(1),reverse=True)
def info(self):
return super(WordFreqManager,self).info()+"my magor is "+self.__word_freqs.__class__.__name__
class WordFrequencyController(InfoAbstaract):
def __init__(self, path_to_file):
self._storage_manager = DataStore(path_to_file)
self._stop_word_manager = StopWordManager()
self._word_freq_manager = WordFreqManager()
def run(self):
for w in self._storage_manager.words():
if not self._stop_word_manager.is_stop_word(w):
self._word_freq_manager.increment_count(w)
word_freqs = self._word_freq_manager.sorted()
for (w, c) in word_freqs[0:25]:
print(w, ' - ', c)
#
# The main function
#
WordFrequencyController(sys.argv[1]).run()
| en | 0.22195 | 词汇统计 # # The main function # | 3.04124 | 3 |
texar/tf/data/data/data_iterators_test.py | jiajunhua/asyml-texar | 1 | 6633201 | # -*- coding: utf-8 -*-
#
"""
Unit tests for data iterator related operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# pylint: disable=no-member, invalid-name
import tempfile
import numpy as np
import tensorflow as tf
import texar.tf as tx
class DataIteratorTest(tf.test.TestCase):
"""Tests data iterators.
"""
def setUp(self):
tf.test.TestCase.setUp(self)
# Create data
train_text = list(np.linspace(1, 1000, num=1000, dtype=np.int64))
train_text = [str(x) for x in train_text]
train_text_file = tempfile.NamedTemporaryFile()
train_text_file.write('\n'.join(train_text).encode("utf-8"))
train_text_file.flush()
self._train_text_file = train_text_file
test_text = list(np.linspace(1001, 2000, num=1000, dtype=np.int64))
test_text = [str(x) for x in test_text]
test_text_file = tempfile.NamedTemporaryFile()
test_text_file.write('\n'.join(test_text).encode("utf-8"))
test_text_file.flush()
self._test_text_file = test_text_file
vocab_list = train_text + test_text
vocab_file = tempfile.NamedTemporaryFile()
vocab_file.write('\n'.join(vocab_list).encode("utf-8"))
vocab_file.flush()
self._vocab_file = vocab_file
self._vocab_size = len(vocab_list)
self._train_hparams = {
"num_epochs": 2,
"batch_size": 1,
"shuffle": False,
"dataset": {
"files": self._train_text_file.name,
"vocab_file": self._vocab_file.name,
"bos_token": '',
"eos_token": ''
},
"name": "train"
}
self._test_hparams = {
"num_epochs": 1,
"batch_size": 1,
"shuffle": False,
"dataset": {
"files": self._test_text_file.name,
"vocab_file": self._vocab_file.name,
"bos_token": '',
"eos_token": ''
},
"name": "test"
}
def test_iterator_single_dataset(self):
"""Tests iterating over a single dataset.
"""
data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.DataIterator(data)
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
iterator.switch_to_dataset(sess)
i = 1001
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Done -- epoch limit reached')
self.assertEqual(i, 2001)
break
def test_iterator_multi_datasets(self):
"""Tests iterating over multiple datasets.
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.DataIterator([train_data, test_data])
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
# Iterates over train data
iterator.switch_to_dataset(sess, train_data.name)
i = 0
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
# Iterates over test data
iterator.switch_to_dataset(sess, test_data.name)
i = 1001
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
def test_train_test_data_iterator(self):
"""Tests :class:`texar.tf.data.TrainTestDataIterator`
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.TrainTestDataIterator(train=train_data,
test=test_data)
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
iterator.switch_to_train_data(sess)
i = 0
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
iterator.switch_to_test_data(sess)
i = 1001
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
def test_feedable_iterator_multi_datasets(self):
"""Tests iterating over multiple datasets with the
:class:`FeedableDataIterator`.
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.FeedableDataIterator([train_data, test_data])
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
iterator.initialize_dataset(sess)
for _ in range(2):
# Iterates over train data
iterator.restart_dataset(sess, train_data.name)
data_handle = iterator.get_handle(sess, train_data.name)
i = 0
while True:
try:
feed_dict = {iterator.handle: data_handle}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
# Iterates over test data
iterator.restart_dataset(sess, test_data.name)
data_handle = iterator.get_handle(sess, test_data.name)
i = 1001
while True:
try:
feed_dict = {iterator.handle: data_handle}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
def test_train_test_feedable_data_iterator(self):
"""Tests :class:`texar.tf.data.TrainTestFeedableDataIterator`
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.TrainTestFeedableDataIterator(train=train_data,
test=test_data)
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
iterator.restart_train_dataset(sess)
i = 0
while True:
try:
feed_dict = {
iterator.handle: iterator.get_train_handle(sess)
}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
iterator.restart_test_dataset(sess)
i = 1001
while True:
try:
feed_dict = {
iterator.handle: iterator.get_test_handle(sess)
}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
if __name__ == "__main__":
tf.test.main()
| # -*- coding: utf-8 -*-
#
"""
Unit tests for data iterator related operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# pylint: disable=no-member, invalid-name
import tempfile
import numpy as np
import tensorflow as tf
import texar.tf as tx
class DataIteratorTest(tf.test.TestCase):
"""Tests data iterators.
"""
def setUp(self):
tf.test.TestCase.setUp(self)
# Create data
train_text = list(np.linspace(1, 1000, num=1000, dtype=np.int64))
train_text = [str(x) for x in train_text]
train_text_file = tempfile.NamedTemporaryFile()
train_text_file.write('\n'.join(train_text).encode("utf-8"))
train_text_file.flush()
self._train_text_file = train_text_file
test_text = list(np.linspace(1001, 2000, num=1000, dtype=np.int64))
test_text = [str(x) for x in test_text]
test_text_file = tempfile.NamedTemporaryFile()
test_text_file.write('\n'.join(test_text).encode("utf-8"))
test_text_file.flush()
self._test_text_file = test_text_file
vocab_list = train_text + test_text
vocab_file = tempfile.NamedTemporaryFile()
vocab_file.write('\n'.join(vocab_list).encode("utf-8"))
vocab_file.flush()
self._vocab_file = vocab_file
self._vocab_size = len(vocab_list)
self._train_hparams = {
"num_epochs": 2,
"batch_size": 1,
"shuffle": False,
"dataset": {
"files": self._train_text_file.name,
"vocab_file": self._vocab_file.name,
"bos_token": '',
"eos_token": ''
},
"name": "train"
}
self._test_hparams = {
"num_epochs": 1,
"batch_size": 1,
"shuffle": False,
"dataset": {
"files": self._test_text_file.name,
"vocab_file": self._vocab_file.name,
"bos_token": '',
"eos_token": ''
},
"name": "test"
}
def test_iterator_single_dataset(self):
"""Tests iterating over a single dataset.
"""
data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.DataIterator(data)
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
iterator.switch_to_dataset(sess)
i = 1001
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Done -- epoch limit reached')
self.assertEqual(i, 2001)
break
def test_iterator_multi_datasets(self):
"""Tests iterating over multiple datasets.
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.DataIterator([train_data, test_data])
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
# Iterates over train data
iterator.switch_to_dataset(sess, train_data.name)
i = 0
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
# Iterates over test data
iterator.switch_to_dataset(sess, test_data.name)
i = 1001
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
def test_train_test_data_iterator(self):
"""Tests :class:`texar.tf.data.TrainTestDataIterator`
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.TrainTestDataIterator(train=train_data,
test=test_data)
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
iterator.switch_to_train_data(sess)
i = 0
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
iterator.switch_to_test_data(sess)
i = 1001
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
def test_feedable_iterator_multi_datasets(self):
"""Tests iterating over multiple datasets with the
:class:`FeedableDataIterator`.
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.FeedableDataIterator([train_data, test_data])
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
iterator.initialize_dataset(sess)
for _ in range(2):
# Iterates over train data
iterator.restart_dataset(sess, train_data.name)
data_handle = iterator.get_handle(sess, train_data.name)
i = 0
while True:
try:
feed_dict = {iterator.handle: data_handle}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
# Iterates over test data
iterator.restart_dataset(sess, test_data.name)
data_handle = iterator.get_handle(sess, test_data.name)
i = 1001
while True:
try:
feed_dict = {iterator.handle: data_handle}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
def test_train_test_feedable_data_iterator(self):
"""Tests :class:`texar.tf.data.TrainTestFeedableDataIterator`
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.TrainTestFeedableDataIterator(train=train_data,
test=test_data)
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
iterator.restart_train_dataset(sess)
i = 0
while True:
try:
feed_dict = {
iterator.handle: iterator.get_train_handle(sess)
}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
iterator.restart_test_dataset(sess)
i = 1001
while True:
try:
feed_dict = {
iterator.handle: iterator.get_test_handle(sess)
}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
if __name__ == "__main__":
tf.test.main()
| en | 0.533525 | # -*- coding: utf-8 -*- # Unit tests for data iterator related operations. # pylint: disable=no-member, invalid-name Tests data iterators. # Create data Tests iterating over a single dataset. Tests iterating over multiple datasets. # Iterates over train data # Iterates over test data Tests :class:`texar.tf.data.TrainTestDataIterator` Tests iterating over multiple datasets with the :class:`FeedableDataIterator`. # Iterates over train data # Iterates over test data Tests :class:`texar.tf.data.TrainTestFeedableDataIterator` | 2.667186 | 3 |
fetch_data.py | buttercrab/movie-trend | 0 | 6633202 | <filename>fetch_data.py<gh_stars>0
import argparse
import json
import urllib.parse
from datetime import datetime
from datetime import timedelta
from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
secret_key = ''
conf = json.loads(open('data/conf.json', 'r').readline())
parser = argparse.ArgumentParser()
parser.add_argument('--movie', type=str)
args = parser.parse_args()
def make_date(d: datetime):
return "%04d-%02d-%02d" % (d.year, d.month, d.day)
def fetch_api_key():
global secret_key
if secret_key == '':
secret = open('secret/secret.txt', 'r')
secret_key = secret.readline()
return secret_key
def save_conf():
global conf
conf_file = open('data/conf.json', 'w')
conf_file.write(json.dumps(conf, ensure_ascii=False))
if __name__ == '__main__':
fetch_api_key()
movies = []
if args.movie is None:
html = urlopen('https://movie.naver.com/movie/sdb/rank/rmovie.nhn')
bs_object = BeautifulSoup(html, 'html.parser')
for item in bs_object.select('.tit3'):
movies.append(str(item.find_all('a')[0].text))
else:
movies.append(args.movie)
cnt = 0
for movie in movies:
cnt += 1
print(str(cnt) + ': downloading movie "' + movie + '"')
try:
encoded = urllib.parse.quote(movie)
html = urlopen('http://www.kobis.or.kr/kobis/business/mast/mvie/searchMovieList.do?sMovName=' + encoded)
bs_object = BeautifulSoup(html, 'html.parser')
code = str(bs_object.select('.tac')[0].find_all('span')[0].text).strip()
encoded = urllib.parse.quote(movie + ' 개봉일')
html = urlopen('https://search.naver.com/search.naver?query=' + encoded)
bs_object = BeautifulSoup(html, 'html.parser')
date = str(bs_object.select('.property')[0].text).split()[0][:-1].replace('.', '-')
html = urlopen(
'http://www.kobis.or.kr/kobis/business/mast/mvie/searchMovieDtlXls.do?sType=box&code=' + code)
bs_object = BeautifulSoup(html, 'html.parser')
flag = False
audience_data = []
for item in bs_object.select('tbody')[0].find_all('tr'):
cur_date = str(item.find_all('td')[0].text)
if cur_date == date:
flag = True
if flag:
audience_data.append({
'time': cur_date,
'data': int(str(item.find_all('td')[10].text).replace(',', ''))
})
body = {
'startDate': make_date(datetime.strptime(date, '%Y-%m-%d') - timedelta(days=10)),
'endDate': make_date(
min(datetime.now() - timedelta(days=1), datetime.strptime(date, '%Y-%m-%d') + timedelta(days=60))),
'timeUnit': 'date',
'keywordGroups': [
{
'groupName': movie,
'keywords': [
movie,
movie.replace(':', '').replace(' ', ' ')
]
},
]
}
headers = {
'X-Naver-Client-Id': '1vwchK27lb2hC4W3Cufh',
'X-Naver-Client-Secret': secret_key,
'Content-Type': 'application/json'
}
res = requests.post('https://openapi.naver.com/v1/datalab/search', data=json.dumps(body), headers=headers)
search_data = []
for i in res.json()['results'][0]['data']:
search_data.append({
'time': i['period'],
'data': i['ratio']
})
except:
cnt -= 1
continue
file = open('data/' + movie + '.json', 'w+')
file.write(json.dumps({
'audience_data': audience_data,
'search_data': search_data
}))
file.close()
conf[movie] = date
save_conf()
print('downloaded ' + str(cnt) + ' movie data, ' + str(len(movies) - cnt) + ' failed')
| <filename>fetch_data.py<gh_stars>0
import argparse
import json
import urllib.parse
from datetime import datetime
from datetime import timedelta
from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
secret_key = ''
conf = json.loads(open('data/conf.json', 'r').readline())
parser = argparse.ArgumentParser()
parser.add_argument('--movie', type=str)
args = parser.parse_args()
def make_date(d: datetime):
return "%04d-%02d-%02d" % (d.year, d.month, d.day)
def fetch_api_key():
global secret_key
if secret_key == '':
secret = open('secret/secret.txt', 'r')
secret_key = secret.readline()
return secret_key
def save_conf():
global conf
conf_file = open('data/conf.json', 'w')
conf_file.write(json.dumps(conf, ensure_ascii=False))
if __name__ == '__main__':
fetch_api_key()
movies = []
if args.movie is None:
html = urlopen('https://movie.naver.com/movie/sdb/rank/rmovie.nhn')
bs_object = BeautifulSoup(html, 'html.parser')
for item in bs_object.select('.tit3'):
movies.append(str(item.find_all('a')[0].text))
else:
movies.append(args.movie)
cnt = 0
for movie in movies:
cnt += 1
print(str(cnt) + ': downloading movie "' + movie + '"')
try:
encoded = urllib.parse.quote(movie)
html = urlopen('http://www.kobis.or.kr/kobis/business/mast/mvie/searchMovieList.do?sMovName=' + encoded)
bs_object = BeautifulSoup(html, 'html.parser')
code = str(bs_object.select('.tac')[0].find_all('span')[0].text).strip()
encoded = urllib.parse.quote(movie + ' 개봉일')
html = urlopen('https://search.naver.com/search.naver?query=' + encoded)
bs_object = BeautifulSoup(html, 'html.parser')
date = str(bs_object.select('.property')[0].text).split()[0][:-1].replace('.', '-')
html = urlopen(
'http://www.kobis.or.kr/kobis/business/mast/mvie/searchMovieDtlXls.do?sType=box&code=' + code)
bs_object = BeautifulSoup(html, 'html.parser')
flag = False
audience_data = []
for item in bs_object.select('tbody')[0].find_all('tr'):
cur_date = str(item.find_all('td')[0].text)
if cur_date == date:
flag = True
if flag:
audience_data.append({
'time': cur_date,
'data': int(str(item.find_all('td')[10].text).replace(',', ''))
})
body = {
'startDate': make_date(datetime.strptime(date, '%Y-%m-%d') - timedelta(days=10)),
'endDate': make_date(
min(datetime.now() - timedelta(days=1), datetime.strptime(date, '%Y-%m-%d') + timedelta(days=60))),
'timeUnit': 'date',
'keywordGroups': [
{
'groupName': movie,
'keywords': [
movie,
movie.replace(':', '').replace(' ', ' ')
]
},
]
}
headers = {
'X-Naver-Client-Id': '1vwchK27lb2hC4W3Cufh',
'X-Naver-Client-Secret': secret_key,
'Content-Type': 'application/json'
}
res = requests.post('https://openapi.naver.com/v1/datalab/search', data=json.dumps(body), headers=headers)
search_data = []
for i in res.json()['results'][0]['data']:
search_data.append({
'time': i['period'],
'data': i['ratio']
})
except:
cnt -= 1
continue
file = open('data/' + movie + '.json', 'w+')
file.write(json.dumps({
'audience_data': audience_data,
'search_data': search_data
}))
file.close()
conf[movie] = date
save_conf()
print('downloaded ' + str(cnt) + ' movie data, ' + str(len(movies) - cnt) + ' failed')
| none | 1 | 3.176602 | 3 |
|
lib/common.py | ChipsnMedia/vaapi-fits | 0 | 6633203 | ###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from datetime import datetime as dt
import functools
import os
import slash
import subprocess
import threading
import time
def sorted_by_resolution(cases):
size = lambda kv: kv[1]["width"] * kv[1]["height"]
return [kv[0] for kv in sorted(cases.items(), key = size)]
def timefn(label):
def count(function):
# Keep track of the number of times this function was called from the
# current test context. This allows us to use a unique label for the
# test details.
count = get_media()._test_state_value(function, 0)
count.value += 1
return count.value
def inner(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
start = dt.now()
try:
ret = function(*args, **kwargs)
except:
raise
finally:
stotal = (dt.now() - start).total_seconds()
kdetail = "time({}:{})".format(label, count(function))
get_media()._set_test_details(**{kdetail : "{:.4f}s".format(stotal)})
return ret
return wrapper
return inner
def parametrize_with_unused(names, values, unused):
def inner(func):
used = vars(func).setdefault("__params_used__", list())
@functools.wraps(func)
@slash.parametrize(names, sorted(values))
def wrapper(*args, **kwargs):
params = kwargs.copy()
for param in unused:
slash.logger.notice("NOTICE: '{}' parameter unused".format(param))
del params[param]
if params in used:
slash.skip_test("Test case is redundant")
used.append(params)
func(*args, **kwargs)
return wrapper
return inner
class memoize:
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args):
try:
return self.memoized[args]
except KeyError:
r = self.function(*args)
self.memoized[args] = r
return r
def __repr__(self):
return str(self.function.__name__)
@memoize
def get_media():
return slash.plugins.manager.get_plugin("media")
def killproc(proc):
result = proc.poll()
if result is not None:
return result
# try to 'gently' terminate proc
proc.terminate()
for i in range(5):
result = proc.poll()
if result is not None:
return result
time.sleep(1) # wait a little longer for proc to terminate
# failed to terminate proc, so kill it
proc.kill()
for i in range(10):
result = proc.poll()
if result is not None:
return result
time.sleep(1) # give system more time to kill proc
# failed to kill proc
if result is None:
slash.logger.warn('Failed to kill process with pid {}'.format(proc.pid))
return result
def call(command, withSlashLogger = True):
calls_allowed = get_media()._calls_allowed()
assert calls_allowed, "call refused"
if withSlashLogger:
logger = slash.logger.debug
else:
logger = lambda x: None
def readproc(proc):
for line in iter(proc.stdout.readline, ''):
readproc.output += line
logger(line.rstrip('\n'))
readproc.output = ""
def timeout(proc):
timeout.triggered = proc.poll() is None
killproc(proc)
timeout.triggered = False
error = False
message = ""
# Without "exec", the shell will launch the "command" in a child process and
# proc.pid will represent the shell (not the "command"). And therefore, the
# "command" will not get killed with proc.terminate() or proc.kill().
#
# When we use "exec" to run the "command". This will cause the "command" to
# inherit the shell process and proc.pid will represent the actual "command".
proc = subprocess.Popen(
"exec " + command,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
shell = True,
universal_newlines = True)
logger("CALL: {} (pid: {})".format(command, proc.pid))
reader = threading.Thread(target = readproc, args = [proc])
timer = threading.Timer(get_media()._get_call_timeout(), timeout, [proc])
reader.daemon = True
timer.daemon = True
reader.start()
timer.start()
try: # in case of user interrupt
proc.wait()
timer.cancel()
except:
killproc(proc)
raise
finally:
timer.cancel()
timer.join(30)
reader.join(30)
proc.stdin.close()
proc.stdout.close()
if timeout.triggered:
error = True
get_media()._report_call_timeout()
message = "CALL TIMEOUT: timeout after {} seconds (pid: {}).".format(
timer.interval, proc.pid)
elif proc.returncode != 0:
error = True
message = "CALL ERROR: failed with exitcode {} (pid: {})".format(proc.returncode, proc.pid)
assert not error, message
return readproc.output
def try_call(command):
try:
subprocess.check_output(command, stderr = subprocess.STDOUT, shell = True)
except:
return False
return True
def mapRange(value, srcRange, destRange):
(smin, smax), (dmin, dmax) = srcRange, destRange
return dmin + ((value - smin) * (dmax - dmin) / (smax - smin))
def mapRangeInt(value, srcRange, destRange):
(smin, smax), (dmin, dmax) = srcRange, destRange
return int(dmin + ((value - smin) * (dmax - dmin) // (smax - smin)))
def mapRangeWithDefault(value, srcRange, dstRange):
# Normalizes a value from the source range into the destination range,
# taking the midpoint/default of each range into account.
smin, smid, smax = srcRange
dmin, dmid, dmax = dstRange
if value < smid:
return (value - smin) / (smid - smin) * (dmid - dmin) + dmin
return (value - smid) / (smax - smid) * (dmax - dmid) + dmid
# some path helpers
def abspath(path):
return os.path.sep + os.path.abspath(path).lstrip(os.path.sep)
def pathexists(path):
return os.path.exists(abspath(path))
def makepath(path):
if not pathexists(path):
os.makedirs(abspath(path))
| ###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from datetime import datetime as dt
import functools
import os
import slash
import subprocess
import threading
import time
def sorted_by_resolution(cases):
size = lambda kv: kv[1]["width"] * kv[1]["height"]
return [kv[0] for kv in sorted(cases.items(), key = size)]
def timefn(label):
def count(function):
# Keep track of the number of times this function was called from the
# current test context. This allows us to use a unique label for the
# test details.
count = get_media()._test_state_value(function, 0)
count.value += 1
return count.value
def inner(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
start = dt.now()
try:
ret = function(*args, **kwargs)
except:
raise
finally:
stotal = (dt.now() - start).total_seconds()
kdetail = "time({}:{})".format(label, count(function))
get_media()._set_test_details(**{kdetail : "{:.4f}s".format(stotal)})
return ret
return wrapper
return inner
def parametrize_with_unused(names, values, unused):
def inner(func):
used = vars(func).setdefault("__params_used__", list())
@functools.wraps(func)
@slash.parametrize(names, sorted(values))
def wrapper(*args, **kwargs):
params = kwargs.copy()
for param in unused:
slash.logger.notice("NOTICE: '{}' parameter unused".format(param))
del params[param]
if params in used:
slash.skip_test("Test case is redundant")
used.append(params)
func(*args, **kwargs)
return wrapper
return inner
class memoize:
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args):
try:
return self.memoized[args]
except KeyError:
r = self.function(*args)
self.memoized[args] = r
return r
def __repr__(self):
return str(self.function.__name__)
@memoize
def get_media():
return slash.plugins.manager.get_plugin("media")
def killproc(proc):
result = proc.poll()
if result is not None:
return result
# try to 'gently' terminate proc
proc.terminate()
for i in range(5):
result = proc.poll()
if result is not None:
return result
time.sleep(1) # wait a little longer for proc to terminate
# failed to terminate proc, so kill it
proc.kill()
for i in range(10):
result = proc.poll()
if result is not None:
return result
time.sleep(1) # give system more time to kill proc
# failed to kill proc
if result is None:
slash.logger.warn('Failed to kill process with pid {}'.format(proc.pid))
return result
def call(command, withSlashLogger = True):
calls_allowed = get_media()._calls_allowed()
assert calls_allowed, "call refused"
if withSlashLogger:
logger = slash.logger.debug
else:
logger = lambda x: None
def readproc(proc):
for line in iter(proc.stdout.readline, ''):
readproc.output += line
logger(line.rstrip('\n'))
readproc.output = ""
def timeout(proc):
timeout.triggered = proc.poll() is None
killproc(proc)
timeout.triggered = False
error = False
message = ""
# Without "exec", the shell will launch the "command" in a child process and
# proc.pid will represent the shell (not the "command"). And therefore, the
# "command" will not get killed with proc.terminate() or proc.kill().
#
# When we use "exec" to run the "command". This will cause the "command" to
# inherit the shell process and proc.pid will represent the actual "command".
proc = subprocess.Popen(
"exec " + command,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
shell = True,
universal_newlines = True)
logger("CALL: {} (pid: {})".format(command, proc.pid))
reader = threading.Thread(target = readproc, args = [proc])
timer = threading.Timer(get_media()._get_call_timeout(), timeout, [proc])
reader.daemon = True
timer.daemon = True
reader.start()
timer.start()
try: # in case of user interrupt
proc.wait()
timer.cancel()
except:
killproc(proc)
raise
finally:
timer.cancel()
timer.join(30)
reader.join(30)
proc.stdin.close()
proc.stdout.close()
if timeout.triggered:
error = True
get_media()._report_call_timeout()
message = "CALL TIMEOUT: timeout after {} seconds (pid: {}).".format(
timer.interval, proc.pid)
elif proc.returncode != 0:
error = True
message = "CALL ERROR: failed with exitcode {} (pid: {})".format(proc.returncode, proc.pid)
assert not error, message
return readproc.output
def try_call(command):
try:
subprocess.check_output(command, stderr = subprocess.STDOUT, shell = True)
except:
return False
return True
def mapRange(value, srcRange, destRange):
(smin, smax), (dmin, dmax) = srcRange, destRange
return dmin + ((value - smin) * (dmax - dmin) / (smax - smin))
def mapRangeInt(value, srcRange, destRange):
(smin, smax), (dmin, dmax) = srcRange, destRange
return int(dmin + ((value - smin) * (dmax - dmin) // (smax - smin)))
def mapRangeWithDefault(value, srcRange, dstRange):
# Normalizes a value from the source range into the destination range,
# taking the midpoint/default of each range into account.
smin, smid, smax = srcRange
dmin, dmid, dmax = dstRange
if value < smid:
return (value - smin) / (smid - smin) * (dmid - dmin) + dmin
return (value - smid) / (smax - smid) * (dmax - dmid) + dmid
# some path helpers
def abspath(path):
return os.path.sep + os.path.abspath(path).lstrip(os.path.sep)
def pathexists(path):
return os.path.exists(abspath(path))
def makepath(path):
if not pathexists(path):
os.makedirs(abspath(path))
| en | 0.803074 | ### ### Copyright (C) 2018-2019 Intel Corporation ### ### SPDX-License-Identifier: BSD-3-Clause ### # Keep track of the number of times this function was called from the # current test context. This allows us to use a unique label for the # test details. # try to 'gently' terminate proc # wait a little longer for proc to terminate # failed to terminate proc, so kill it # give system more time to kill proc # failed to kill proc # Without "exec", the shell will launch the "command" in a child process and # proc.pid will represent the shell (not the "command"). And therefore, the # "command" will not get killed with proc.terminate() or proc.kill(). # # When we use "exec" to run the "command". This will cause the "command" to # inherit the shell process and proc.pid will represent the actual "command". # in case of user interrupt # Normalizes a value from the source range into the destination range, # taking the midpoint/default of each range into account. # some path helpers | 2.09659 | 2 |
melusine/nlp_tools/stemmer.py | DataFactory-Verlingue/dafa_melusine | 1 | 6633204 | <reponame>DataFactory-Verlingue/dafa_melusine<gh_stars>1-10
import logging
from nltk.stem import SnowballStemmer
logger = logging.getLogger(__name__)
class Stemmer:
"""Compute list Series which return the stemmed version of a list of tokens
Stemming is the process of reducing a word to its word stem that affixes to suffixes and prefixes or to the roots of words.
Parameters
----------
input_column : str,
Column of pd.Dataframe which contains a list of tokens, default column ['tokens']
output_column: str,
Column where is saved the list of stemmed tokens, default column ['stemmed_tokens']
language : str,
Language of the tokens to be stemmed.
Supported languages : 'arabic', 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian', 'italian', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian', 'spanish', 'swedish'
Default value, 'french'
Returns
-------
pd.Dataframe
Examples
--------
>>> from melusine.prepare_email.cleaning import Stemmer
>>> stemmer = Stemmer()
>>> stemmer.transform(data)
"""
FILENAME = "nltk_stemmer_meta.pkl"
STEMMER_FILENAME = "nltk_stemmer"
def __init__(self, input_column: str ="tokens", output_column: str ="stemmed_tokens", language: str = 'french'):
self.input_column = input_column
self.output_column = output_column
self.stemmer = SnowballStemmer(language)
def _stemming(self, input_tokens: list):
return [self.stemmer.stem(token) for token in input_tokens]
def fit(self, df, y=None):
""" """
return self
def transform(self, df):
input_data = df[self.input_column]
df[self.output_column] = input_data.apply(self._stemming)
return df | import logging
from nltk.stem import SnowballStemmer
logger = logging.getLogger(__name__)
class Stemmer:
"""Compute list Series which return the stemmed version of a list of tokens
Stemming is the process of reducing a word to its word stem that affixes to suffixes and prefixes or to the roots of words.
Parameters
----------
input_column : str,
Column of pd.Dataframe which contains a list of tokens, default column ['tokens']
output_column: str,
Column where is saved the list of stemmed tokens, default column ['stemmed_tokens']
language : str,
Language of the tokens to be stemmed.
Supported languages : 'arabic', 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian', 'italian', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian', 'spanish', 'swedish'
Default value, 'french'
Returns
-------
pd.Dataframe
Examples
--------
>>> from melusine.prepare_email.cleaning import Stemmer
>>> stemmer = Stemmer()
>>> stemmer.transform(data)
"""
FILENAME = "nltk_stemmer_meta.pkl"
STEMMER_FILENAME = "nltk_stemmer"
def __init__(self, input_column: str ="tokens", output_column: str ="stemmed_tokens", language: str = 'french'):
self.input_column = input_column
self.output_column = output_column
self.stemmer = SnowballStemmer(language)
def _stemming(self, input_tokens: list):
return [self.stemmer.stem(token) for token in input_tokens]
def fit(self, df, y=None):
""" """
return self
def transform(self, df):
input_data = df[self.input_column]
df[self.output_column] = input_data.apply(self._stemming)
return df | en | 0.409131 | Compute list Series which return the stemmed version of a list of tokens Stemming is the process of reducing a word to its word stem that affixes to suffixes and prefixes or to the roots of words. Parameters ---------- input_column : str, Column of pd.Dataframe which contains a list of tokens, default column ['tokens'] output_column: str, Column where is saved the list of stemmed tokens, default column ['stemmed_tokens'] language : str, Language of the tokens to be stemmed. Supported languages : 'arabic', 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian', 'italian', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian', 'spanish', 'swedish' Default value, 'french' Returns ------- pd.Dataframe Examples -------- >>> from melusine.prepare_email.cleaning import Stemmer >>> stemmer = Stemmer() >>> stemmer.transform(data) | 3.657409 | 4 |
user_program/old/test.py | DaveRichmond/USB4VC | 78 | 6633205 | <reponame>DaveRichmond/USB4VC
for x in range(32):
print(hex(x), '', end='') | for x in range(32):
print(hex(x), '', end='') | none | 1 | 2.991596 | 3 |
|
person_tracker/code/utils/streaming.py | beetecu/continous_monitoring | 1 | 6633206 | <gh_stars>1-10
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2 as cv
class MultiStreamerCapture:
def __init__(self, logger, sources):
assert sources
self.logger = logger
self.captures = []
try:
sources = [int(src) for src in sources]
mode = 'cam'
except ValueError:
mode = 'video'
if mode == 'cam':
for id in sources:
self.logger.info('INFO', 'Connection cam {}'.format(id))
cap = cv.VideoCapture(id)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv.CAP_PROP_FPS, 30)
cap.set(cv.CAP_PROP_FOURCC, cv.VideoWriter_fourcc(*'MJPG'))
assert cap.isOpened()
self.captures.append(cap)
else:
for stream_path in sources:
self.logger.info('INFO', 'Opening file {}'.format(stream_path))
input_stream = "souphttpsrc location=" + stream_path + " ! hlsdemux ! decodebin ! videoconvert ! videoscale ! appsink max-buffers=1 drop=true"
cap = cv.VideoCapture(input_stream, cv.CAP_GSTREAMER)
assert cap.isOpened()
self.captures.append(cap)
def get_frames(self):
frames = []
for capture in self.captures:
has_frame, frame = capture.read()
if has_frame:
frames.append(frame)
return len(frames) == len(self.captures), frames
def get_num_sources(self):
return len(self.captures) | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2 as cv
class MultiStreamerCapture:
def __init__(self, logger, sources):
assert sources
self.logger = logger
self.captures = []
try:
sources = [int(src) for src in sources]
mode = 'cam'
except ValueError:
mode = 'video'
if mode == 'cam':
for id in sources:
self.logger.info('INFO', 'Connection cam {}'.format(id))
cap = cv.VideoCapture(id)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv.CAP_PROP_FPS, 30)
cap.set(cv.CAP_PROP_FOURCC, cv.VideoWriter_fourcc(*'MJPG'))
assert cap.isOpened()
self.captures.append(cap)
else:
for stream_path in sources:
self.logger.info('INFO', 'Opening file {}'.format(stream_path))
input_stream = "souphttpsrc location=" + stream_path + " ! hlsdemux ! decodebin ! videoconvert ! videoscale ! appsink max-buffers=1 drop=true"
cap = cv.VideoCapture(input_stream, cv.CAP_GSTREAMER)
assert cap.isOpened()
self.captures.append(cap)
def get_frames(self):
frames = []
for capture in self.captures:
has_frame, frame = capture.read()
if has_frame:
frames.append(frame)
return len(frames) == len(self.captures), frames
def get_num_sources(self):
return len(self.captures) | en | 0.857463 | Copyright (c) 2019 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1.884533 | 2 |
lib/distributions/delta.py | joelouismarino/variational_rl | 15 | 6633207 | <filename>lib/distributions/delta.py
import torch
import numpy as np
from numbers import Number
from torch.distributions import constraints
from torch.distributions import Distribution
from torch.distributions.utils import broadcast_all
class Delta(Distribution):
arg_constraints = {'loc': constraints.real}
support = constraints.real
has_rsample = True
@property
def mean(self):
return self.loc
def __init__(self, loc, validate_args=None):
self.loc = loc # broadcast_all(loc)
if isinstance(loc, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(Delta, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Delta, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
super(Delta, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return self.loc.expand(shape)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
return self.loc
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
if (value == self.loc).all().item():
return 0.
else:
return torch.tensor(np.inf)
| <filename>lib/distributions/delta.py
import torch
import numpy as np
from numbers import Number
from torch.distributions import constraints
from torch.distributions import Distribution
from torch.distributions.utils import broadcast_all
class Delta(Distribution):
arg_constraints = {'loc': constraints.real}
support = constraints.real
has_rsample = True
@property
def mean(self):
return self.loc
def __init__(self, loc, validate_args=None):
self.loc = loc # broadcast_all(loc)
if isinstance(loc, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(Delta, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Delta, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
super(Delta, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return self.loc.expand(shape)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
return self.loc
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
if (value == self.loc).all().item():
return 0.
else:
return torch.tensor(np.inf)
| en | 0.467346 | # broadcast_all(loc) | 2.333567 | 2 |
yt_dlp/extractor/skyit.py | nxtreaming/yt-dlp | 11 | 6633208 | from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
dict_get,
int_or_none,
parse_duration,
unified_timestamp,
)
class SkyItPlayerIE(InfoExtractor):
IE_NAME = 'player.sky.it'
_VALID_URL = r'https?://player\.sky\.it/player/(?:external|social)\.html\?.*?\bid=(?P<id>\d+)'
_GEO_BYPASS = False
_DOMAIN = 'sky'
_PLAYER_TMPL = 'https://player.sky.it/player/external.html?id=%s&domain=%s'
# http://static.sky.it/static/skyplayer/conf.json
_TOKEN_MAP = {
'cielo': '<KEY>',
'hotclub': '<KEY>',
'mtv8': 'A5Nn9GGb326CI7vP5e27d7E4PIaQjota',
'salesforce': 'C6D585FD1615272C98DE38235F38BD86',
'sitocommerciale': '<KEY>',
'sky': '<KEY>',
'skyacademy': 'A6LAn7EkO2Q26FRy0IAMBekX6jzDXYL3',
'skyarte': 'LWk29hfiU39NNdq87ePeRach3nzTSV20o0lTv2001Cd',
'theupfront': 'PRSGmDMsg6QMGc04Obpoy7Vsbn7i2Whp',
}
def _player_url_result(self, video_id):
return self.url_result(
self._PLAYER_TMPL % (video_id, self._DOMAIN),
SkyItPlayerIE.ie_key(), video_id)
def _parse_video(self, video, video_id):
title = video['title']
is_live = video.get('type') == 'live'
hls_url = video.get(('streaming' if is_live else 'hls') + '_url')
if not hls_url and video.get('geoblock' if is_live else 'geob'):
self.raise_geo_restricted(countries=['IT'])
if is_live:
formats = self._extract_m3u8_formats(hls_url, video_id, 'mp4')
else:
formats = self._extract_akamai_formats(
hls_url, video_id, {'http': 'videoplatform.sky.it'})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': dict_get(video, ('video_still', 'video_still_medium', 'thumb')),
'description': video.get('short_desc') or None,
'timestamp': unified_timestamp(video.get('create_date')),
'duration': int_or_none(video.get('duration_sec')) or parse_duration(video.get('duration')),
'is_live': is_live,
}
def _real_extract(self, url):
video_id = self._match_id(url)
domain = compat_parse_qs(compat_urllib_parse_urlparse(
url).query).get('domain', [None])[0]
token = dict_get(self._TOKEN_MAP, (domain, 'sky'))
video = self._download_json(
'https://apid.sky.it/vdp/v1/getVideoData',
video_id, query={
'caller': 'sky',
'id': video_id,
'token': token
}, headers=self.geo_verification_headers())
return self._parse_video(video, video_id)
class SkyItVideoIE(SkyItPlayerIE):
IE_NAME = 'video.sky.it'
_VALID_URL = r'https?://(?:masterchef|video|xfactor)\.sky\.it(?:/[^/]+)*/video/[0-9a-z-]+-(?P<id>\d+)'
_TESTS = [{
'url': 'https://video.sky.it/news/mondo/video/uomo-ucciso-da-uno-squalo-in-australia-631227',
'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd',
'info_dict': {
'id': '631227',
'ext': 'mp4',
'title': 'Uomo ucciso da uno squalo in Australia',
'timestamp': 1606036192,
'upload_date': '20201122',
}
}, {
'url': 'https://xfactor.sky.it/video/x-factor-2020-replay-audizioni-1-615820',
'only_matching': True,
}, {
'url': 'https://masterchef.sky.it/video/masterchef-9-cosa-e-successo-nella-prima-puntata-562831',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._player_url_result(video_id)
class SkyItVideoLiveIE(SkyItPlayerIE):
IE_NAME = 'video.sky.it:live'
_VALID_URL = r'https?://video\.sky\.it/diretta/(?P<id>[^/?&#]+)'
_TEST = {
'url': 'https://video.sky.it/diretta/tg24',
'info_dict': {
'id': '1',
'ext': 'mp4',
'title': r're:Diretta TG24 \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
'description': 'Guarda la diretta streaming di SkyTg24, segui con Sky tutti gli appuntamenti e gli speciali di Tg24.',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
asset_id = str(self._search_nextjs_data(webpage, display_id)['props']['initialState']['livePage']['content']['asset_id'])
livestream = self._download_json(
'https://apid.sky.it/vdp/v1/getLivestream',
asset_id, query={'id': asset_id})
return self._parse_video(livestream, asset_id)
class SkyItIE(SkyItPlayerIE):
IE_NAME = 'sky.it'
_VALID_URL = r'https?://(?:sport|tg24)\.sky\.it(?:/[^/]+)*/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://sport.sky.it/calcio/serie-a/2020/11/21/juventus-cagliari-risultato-gol',
'info_dict': {
'id': '631201',
'ext': 'mp4',
'title': 'Un rosso alla violenza: in campo per i diritti delle donne',
'upload_date': '20201121',
'timestamp': 1605995753,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'https://tg24.sky.it/mondo/2020/11/22/australia-squalo-uccide-uomo',
'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd',
'info_dict': {
'id': '631227',
'ext': 'mp4',
'title': 'Uomo ucciso da uno squalo in Australia',
'timestamp': 1606036192,
'upload_date': '20201122',
},
}]
_VIDEO_ID_REGEX = r'data-videoid="(\d+)"'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
self._VIDEO_ID_REGEX, webpage, 'video id')
return self._player_url_result(video_id)
class SkyItAcademyIE(SkyItIE):
IE_NAME = 'skyacademy.it'
_VALID_URL = r'https?://(?:www\.)?skyacademy\.it(?:/[^/]+)*/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://www.skyacademy.it/eventi-speciali/2019/07/05/a-lezione-di-cinema-con-sky-academy-/',
'md5': 'ced5c26638b7863190cbc44dd6f6ba08',
'info_dict': {
'id': '523458',
'ext': 'mp4',
'title': 'Sky Academy "The Best CineCamp 2019"',
'timestamp': 1562843784,
'upload_date': '20190711',
}
}]
_DOMAIN = 'skyacademy'
_VIDEO_ID_REGEX = r'id="news-videoId_(\d+)"'
class SkyItArteIE(SkyItIE):
IE_NAME = 'arte.sky.it'
_VALID_URL = r'https?://arte\.sky\.it/video/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://arte.sky.it/video/serie-musei-venezia-collezionismo-12-novembre/',
'md5': '515aee97b87d7a018b6c80727d3e7e17',
'info_dict': {
'id': '627926',
'ext': 'mp4',
'title': "Musei <NAME> alla Ca' d'Oro Palazzo Grimani",
'upload_date': '20201106',
'timestamp': 1604664493,
}
}]
_DOMAIN = 'skyarte'
_VIDEO_ID_REGEX = r'(?s)<iframe[^>]+src="(?:https:)?//player\.sky\.it/player/external\.html\?[^"]*\bid=(\d+)'
class CieloTVItIE(SkyItIE):
IE_NAME = 'cielotv.it'
_VALID_URL = r'https?://(?:www\.)?cielotv\.it/video/(?P<id>[^.]+)\.html'
_TESTS = [{
'url': 'https://www.cielotv.it/video/Il-lunedi-e-sempre-un-dramma.html',
'md5': 'c4deed77552ba901c2a0d9258320304b',
'info_dict': {
'id': '499240',
'ext': 'mp4',
'title': 'Il lunedì è sempre un dramma',
'upload_date': '20190329',
'timestamp': 1553862178,
}
}]
_DOMAIN = 'cielo'
_VIDEO_ID_REGEX = r'videoId\s*=\s*"(\d+)"'
class TV8ItIE(SkyItVideoIE):
IE_NAME = 'tv8.it'
_VALID_URL = r'https?://tv8\.it/showvideo/(?P<id>\d+)'
_TESTS = [{
'url': 'https://tv8.it/showvideo/630529/ogni-mattina-ucciso-asino-di-andrea-lo-cicero/18-11-2020/',
'md5': '9ab906a3f75ea342ed928442f9dabd21',
'info_dict': {
'id': '630529',
'ext': 'mp4',
'title': 'Ogni mattina - Ucciso asino di Andrea Lo Cicero',
'timestamp': 1605721374,
'upload_date': '20201118',
}
}]
_DOMAIN = 'mtv8'
| from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
dict_get,
int_or_none,
parse_duration,
unified_timestamp,
)
class SkyItPlayerIE(InfoExtractor):
IE_NAME = 'player.sky.it'
_VALID_URL = r'https?://player\.sky\.it/player/(?:external|social)\.html\?.*?\bid=(?P<id>\d+)'
_GEO_BYPASS = False
_DOMAIN = 'sky'
_PLAYER_TMPL = 'https://player.sky.it/player/external.html?id=%s&domain=%s'
# http://static.sky.it/static/skyplayer/conf.json
_TOKEN_MAP = {
'cielo': '<KEY>',
'hotclub': '<KEY>',
'mtv8': 'A5Nn9GGb326CI7vP5e27d7E4PIaQjota',
'salesforce': 'C6D585FD1615272C98DE38235F38BD86',
'sitocommerciale': '<KEY>',
'sky': '<KEY>',
'skyacademy': 'A6LAn7EkO2Q26FRy0IAMBekX6jzDXYL3',
'skyarte': 'LWk29hfiU39NNdq87ePeRach3nzTSV20o0lTv2001Cd',
'theupfront': 'PRSGmDMsg6QMGc04Obpoy7Vsbn7i2Whp',
}
def _player_url_result(self, video_id):
return self.url_result(
self._PLAYER_TMPL % (video_id, self._DOMAIN),
SkyItPlayerIE.ie_key(), video_id)
def _parse_video(self, video, video_id):
title = video['title']
is_live = video.get('type') == 'live'
hls_url = video.get(('streaming' if is_live else 'hls') + '_url')
if not hls_url and video.get('geoblock' if is_live else 'geob'):
self.raise_geo_restricted(countries=['IT'])
if is_live:
formats = self._extract_m3u8_formats(hls_url, video_id, 'mp4')
else:
formats = self._extract_akamai_formats(
hls_url, video_id, {'http': 'videoplatform.sky.it'})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': dict_get(video, ('video_still', 'video_still_medium', 'thumb')),
'description': video.get('short_desc') or None,
'timestamp': unified_timestamp(video.get('create_date')),
'duration': int_or_none(video.get('duration_sec')) or parse_duration(video.get('duration')),
'is_live': is_live,
}
def _real_extract(self, url):
video_id = self._match_id(url)
domain = compat_parse_qs(compat_urllib_parse_urlparse(
url).query).get('domain', [None])[0]
token = dict_get(self._TOKEN_MAP, (domain, 'sky'))
video = self._download_json(
'https://apid.sky.it/vdp/v1/getVideoData',
video_id, query={
'caller': 'sky',
'id': video_id,
'token': token
}, headers=self.geo_verification_headers())
return self._parse_video(video, video_id)
class SkyItVideoIE(SkyItPlayerIE):
IE_NAME = 'video.sky.it'
_VALID_URL = r'https?://(?:masterchef|video|xfactor)\.sky\.it(?:/[^/]+)*/video/[0-9a-z-]+-(?P<id>\d+)'
_TESTS = [{
'url': 'https://video.sky.it/news/mondo/video/uomo-ucciso-da-uno-squalo-in-australia-631227',
'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd',
'info_dict': {
'id': '631227',
'ext': 'mp4',
'title': 'Uomo ucciso da uno squalo in Australia',
'timestamp': 1606036192,
'upload_date': '20201122',
}
}, {
'url': 'https://xfactor.sky.it/video/x-factor-2020-replay-audizioni-1-615820',
'only_matching': True,
}, {
'url': 'https://masterchef.sky.it/video/masterchef-9-cosa-e-successo-nella-prima-puntata-562831',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._player_url_result(video_id)
class SkyItVideoLiveIE(SkyItPlayerIE):
IE_NAME = 'video.sky.it:live'
_VALID_URL = r'https?://video\.sky\.it/diretta/(?P<id>[^/?&#]+)'
_TEST = {
'url': 'https://video.sky.it/diretta/tg24',
'info_dict': {
'id': '1',
'ext': 'mp4',
'title': r're:Diretta TG24 \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
'description': 'Guarda la diretta streaming di SkyTg24, segui con Sky tutti gli appuntamenti e gli speciali di Tg24.',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
asset_id = str(self._search_nextjs_data(webpage, display_id)['props']['initialState']['livePage']['content']['asset_id'])
livestream = self._download_json(
'https://apid.sky.it/vdp/v1/getLivestream',
asset_id, query={'id': asset_id})
return self._parse_video(livestream, asset_id)
class SkyItIE(SkyItPlayerIE):
IE_NAME = 'sky.it'
_VALID_URL = r'https?://(?:sport|tg24)\.sky\.it(?:/[^/]+)*/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://sport.sky.it/calcio/serie-a/2020/11/21/juventus-cagliari-risultato-gol',
'info_dict': {
'id': '631201',
'ext': 'mp4',
'title': 'Un rosso alla violenza: in campo per i diritti delle donne',
'upload_date': '20201121',
'timestamp': 1605995753,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'https://tg24.sky.it/mondo/2020/11/22/australia-squalo-uccide-uomo',
'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd',
'info_dict': {
'id': '631227',
'ext': 'mp4',
'title': 'Uomo ucciso da uno squalo in Australia',
'timestamp': 1606036192,
'upload_date': '20201122',
},
}]
_VIDEO_ID_REGEX = r'data-videoid="(\d+)"'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
self._VIDEO_ID_REGEX, webpage, 'video id')
return self._player_url_result(video_id)
class SkyItAcademyIE(SkyItIE):
IE_NAME = 'skyacademy.it'
_VALID_URL = r'https?://(?:www\.)?skyacademy\.it(?:/[^/]+)*/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://www.skyacademy.it/eventi-speciali/2019/07/05/a-lezione-di-cinema-con-sky-academy-/',
'md5': 'ced5c26638b7863190cbc44dd6f6ba08',
'info_dict': {
'id': '523458',
'ext': 'mp4',
'title': 'Sky Academy "The Best CineCamp 2019"',
'timestamp': 1562843784,
'upload_date': '20190711',
}
}]
_DOMAIN = 'skyacademy'
_VIDEO_ID_REGEX = r'id="news-videoId_(\d+)"'
class SkyItArteIE(SkyItIE):
IE_NAME = 'arte.sky.it'
_VALID_URL = r'https?://arte\.sky\.it/video/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://arte.sky.it/video/serie-musei-venezia-collezionismo-12-novembre/',
'md5': '515aee97b87d7a018b6c80727d3e7e17',
'info_dict': {
'id': '627926',
'ext': 'mp4',
'title': "Musei <NAME> alla Ca' d'Oro Palazzo Grimani",
'upload_date': '20201106',
'timestamp': 1604664493,
}
}]
_DOMAIN = 'skyarte'
_VIDEO_ID_REGEX = r'(?s)<iframe[^>]+src="(?:https:)?//player\.sky\.it/player/external\.html\?[^"]*\bid=(\d+)'
class CieloTVItIE(SkyItIE):
IE_NAME = 'cielotv.it'
_VALID_URL = r'https?://(?:www\.)?cielotv\.it/video/(?P<id>[^.]+)\.html'
_TESTS = [{
'url': 'https://www.cielotv.it/video/Il-lunedi-e-sempre-un-dramma.html',
'md5': 'c4deed77552ba901c2a0d9258320304b',
'info_dict': {
'id': '499240',
'ext': 'mp4',
'title': 'Il lunedì è sempre un dramma',
'upload_date': '20190329',
'timestamp': 1553862178,
}
}]
_DOMAIN = 'cielo'
_VIDEO_ID_REGEX = r'videoId\s*=\s*"(\d+)"'
class TV8ItIE(SkyItVideoIE):
IE_NAME = 'tv8.it'
_VALID_URL = r'https?://tv8\.it/showvideo/(?P<id>\d+)'
_TESTS = [{
'url': 'https://tv8.it/showvideo/630529/ogni-mattina-ucciso-asino-di-andrea-lo-cicero/18-11-2020/',
'md5': '9ab906a3f75ea342ed928442f9dabd21',
'info_dict': {
'id': '630529',
'ext': 'mp4',
'title': 'Ogni mattina - Ucciso asino di Andrea Lo Cicero',
'timestamp': 1605721374,
'upload_date': '20201118',
}
}]
_DOMAIN = 'mtv8'
| en | 0.230158 | # http://static.sky.it/static/skyplayer/conf.json #]+)' # m3u8 download #]+)' #]+)' #]+)' | 2.065862 | 2 |
localization_service/database.py | vladbragoi/indoor_localization_system | 0 | 6633209 | import configparser
import csv
from cloudant.client import CouchDB, CouchDatabase
from cloudant.design_document import DesignDocument
from cloudant.query import Query
from data import Data
from utils import inherit_docstring
from node import Node
ID_KEY = 'id'
X_KEY = 'x'
Y_KEY = 'y'
BORDERS_KEY = 'borders'
DIRECTION_KEY = 'direction'
MEASURE_KEY = 'measure'
MV_X_KEY = 'mv[x]'
MV_Y_KEY = 'mv[y]'
MV_Z_KEY = 'mv[z]'
# TYPE_KEY = 'type' # feature not used
CSV_FIELDS = [ID_KEY, X_KEY, Y_KEY, BORDERS_KEY, DIRECTION_KEY, MEASURE_KEY, MV_X_KEY, MV_Y_KEY, MV_Z_KEY] \
+ Data.get_ap5ghz() + Data.get_ap24ghz()
_client = None
fingerprints_db = ""
localization_db = ""
def initialize():
"""Starts the connection with the server, which parameters
are specified in the configuration file: config.ini.
"""
global _client, fingerprints_db, localization_db
config = configparser.ConfigParser()
config.read('config.ini')
url = config['Database']['url']
username = config['Database']['username']
password = config['Database']['password']
localization_db = config['Database']['localization_db']
fingerprints_db = config['Database']['fingerprinting_db']
_client = CouchDB(username, password, url=url, connect=True)
def get_localization_db():
"""This function creates a localization db instance and returns it to the caller.
:return localization_db_instance: the instance
"""
localization_db_instance = _start(localization_db)
# Add filter function
d_doc = DesignDocument(localization_db_instance, '_design/online')
if not d_doc.exists():
# ignore documents that are deleted or having type != `data_doc`
d_doc['filters'] = {
'dataDoc': 'function(doc) { '
'if (doc._deleted) { return false; } '
'if (doc.type == \'data_doc\') { return true; }'
'return false; '
'}'
}
d_doc.save()
localization_db_instance.set_revision_limit(10)
return localization_db_instance
def _start(db_name):
"""This function creates an instance of the database specified and returns it to the caller.
:return: the CouchDatabase instance
"""
if _client is None:
raise Exception("Should launch initialize method before.")
return CouchDatabase(_client, db_name)
def close():
"""Closes connection with server."""
_client.disconnect()
@inherit_docstring(CouchDatabase.infinite_changes)
def changes(db_name, filter_function):
"""
:param db_name: the source database name for changes
:param filter_function: function for filtering documents in changes
:return: an infinite_changes object
.. seealso:: :ref:`CouchDatabase.infinite_changes()`
"""
database = _start(db_name)
return database.infinite_changes(
feed='continuous',
include_docs=True,
filter=filter_function,
since='now')
def get_nodes(db_name=None):
"""Returns a list of nodes from the specified database.
If None is passed, default fingerprinting db will be used.
:param db_name: the database name
:return: a list of nodes
"""
if db_name is None:
db_name = fingerprints_db
query = Query(_start(db_name),
selector={'_id': {'$gt': None}},
fields=['_id', 'x', 'y', 'borders'],
use_index='_all_docs')
# return list(query.result) # return a list of dicts
return [Node(doc['_id'], x=doc['x'], y=doc['y'], borders=doc['borders']) for doc in query.result]
def load_nodes_from_csv_file(path):
"""Return a list of nodes, loaded from specified csv file.
:param path: the path to the file
:return: a list of nodes
"""
with open(path, 'r') as csv_file:
rows = csv.DictReader(csv_file)
return [Node(row['id'], x=row['x'], y=row['y'], borders=row['borders']) for row in rows]
def _convert_old_document_type(filename, db_name):
database = _start(db_name)
query = Query(database, selector={'_id': {'$gt': None}}, fields=['_id'], use_index='_all_docs')
with open(filename, mode='w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=CSV_FIELDS)
writer.writeheader()
doc_list = [doc['_id'] for doc in query.result]
doc_list.sort(key=lambda x: int(x))
for doc_id in doc_list:
print("Document", doc_id)
document = database[doc_id]
directions = document['Misurazioni']
for direction in directions.keys():
measures = directions[direction]
measure_keys = list(measures.keys())
measure_keys.sort(key=lambda x: int(x.replace('Misurazione ', '')))
for measure in measure_keys:
mv = measures[measure]['Vettore Magnetico']
dictionary = get_initialized_dict()
dictionary[ID_KEY] = doc_id
dictionary[X_KEY] = document['X position']
dictionary[Y_KEY] = document['Y position']
dictionary[BORDERS_KEY] = ''.join(Data.convert_direction(document['Borders']))
dictionary[DIRECTION_KEY] = ''.join(Data.convert_direction(direction))
dictionary[MEASURE_KEY] = measure.replace('Misurazione ', '')
dictionary[MV_X_KEY] = mv[0]
dictionary[MV_Y_KEY] = mv[1]
dictionary[MV_Z_KEY] = mv[2]
# WIFI LIST
rssi_list = list(measures[measure].keys())
if 'Vettore Magnetico' in rssi_list:
rssi_list.remove('Vettore Magnetico')
rssi_list.sort(key=lambda x: int(x.replace('RSSI ', ''))) # order list on number base
for rssi_key in rssi_list:
rssi = measures[measure][rssi_key]
if rssi['id'].strip() in CSV_FIELDS:
dictionary[rssi['id']] = rssi['value']
writer.writerow(dictionary)
print("\t", direction, "converted.")
def _convert_new_document_type(filename, db_name):
""" ``todo:: ble beacons and magnetic field need to be saved to the csv file ``
"""
database = _start(db_name)
query = Query(database, selector={'_id': {'$gt': None}}, fields=['_id'], use_index='_all_docs')
with open(filename, mode='w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=CSV_FIELDS)
writer.writeheader()
doc_list = [doc['_id'] for doc in query.result]
doc_list.sort(key=lambda x: int(x))
for doc_id in doc_list:
print("Document", doc_id)
document = database[doc_id]
measures = document['measures']
for direction in measures.keys():
wifi_list = measures[direction]['wifi']
# mv_list = measures[direction]['mv']
# ble = measures[direction]['ble'] # feature not used
index = 1
for measure in wifi_list:
dictionary = get_initialized_dict()
dictionary[ID_KEY] = document['_id']
dictionary[X_KEY] = document['x']
dictionary[Y_KEY] = document['y']
dictionary[BORDERS_KEY] = ''.join(Data.convert_direction(document['borders']))
dictionary[DIRECTION_KEY] = ''.join(Data.convert_direction(direction))
dictionary[MEASURE_KEY] = index
for wifi_node in measure:
if wifi_node['id'] in dictionary.keys():
dictionary[wifi_node['id']] = wifi_node['value']
writer.writerow(dictionary)
index += 1
print("\t", direction, "converted.")
def export_db_to_csv(filename, doc_type='new', db_name=None):
"""Exports all documents from specified database to a csv file.
:param filename: the csv file where to export documents
:param doc_type: the structure type of documents
:param db_name: the database to export
.. note:: Use 'new' as doc_type for new json document structure, 'old', for old one.
.. seealso:: :ref:`converter.py` for the new json document structure
"""
if _client is None:
initialize()
if db_name is None:
db_name = fingerprints_db
if doc_type == 'new':
_convert_new_document_type(filename, db_name)
else:
_convert_old_document_type(filename, db_name)
def get_initialized_dict():
"""Initializes a dictionary with pairs: ap mac address and -110 default rssi value.
:return: a dictionary populated by ap mac addresses and -110 default rssi values
"""
dictionary = {}.fromkeys(CSV_FIELDS)
for mac in Data.get_ap5ghz():
dictionary[mac] = -110
for mac in Data.get_ap24ghz():
dictionary[mac] = -110
return dictionary
if __name__ == '__main__':
# TESTS
initialize()
export_db_to_csv("fingerprints.csv", doc_type='old', db_name='fingerprints_backup')
close()
| import configparser
import csv
from cloudant.client import CouchDB, CouchDatabase
from cloudant.design_document import DesignDocument
from cloudant.query import Query
from data import Data
from utils import inherit_docstring
from node import Node
ID_KEY = 'id'
X_KEY = 'x'
Y_KEY = 'y'
BORDERS_KEY = 'borders'
DIRECTION_KEY = 'direction'
MEASURE_KEY = 'measure'
MV_X_KEY = 'mv[x]'
MV_Y_KEY = 'mv[y]'
MV_Z_KEY = 'mv[z]'
# TYPE_KEY = 'type' # feature not used
CSV_FIELDS = [ID_KEY, X_KEY, Y_KEY, BORDERS_KEY, DIRECTION_KEY, MEASURE_KEY, MV_X_KEY, MV_Y_KEY, MV_Z_KEY] \
+ Data.get_ap5ghz() + Data.get_ap24ghz()
_client = None
fingerprints_db = ""
localization_db = ""
def initialize():
"""Starts the connection with the server, which parameters
are specified in the configuration file: config.ini.
"""
global _client, fingerprints_db, localization_db
config = configparser.ConfigParser()
config.read('config.ini')
url = config['Database']['url']
username = config['Database']['username']
password = config['Database']['password']
localization_db = config['Database']['localization_db']
fingerprints_db = config['Database']['fingerprinting_db']
_client = CouchDB(username, password, url=url, connect=True)
def get_localization_db():
"""This function creates a localization db instance and returns it to the caller.
:return localization_db_instance: the instance
"""
localization_db_instance = _start(localization_db)
# Add filter function
d_doc = DesignDocument(localization_db_instance, '_design/online')
if not d_doc.exists():
# ignore documents that are deleted or having type != `data_doc`
d_doc['filters'] = {
'dataDoc': 'function(doc) { '
'if (doc._deleted) { return false; } '
'if (doc.type == \'data_doc\') { return true; }'
'return false; '
'}'
}
d_doc.save()
localization_db_instance.set_revision_limit(10)
return localization_db_instance
def _start(db_name):
"""This function creates an instance of the database specified and returns it to the caller.
:return: the CouchDatabase instance
"""
if _client is None:
raise Exception("Should launch initialize method before.")
return CouchDatabase(_client, db_name)
def close():
"""Closes connection with server."""
_client.disconnect()
@inherit_docstring(CouchDatabase.infinite_changes)
def changes(db_name, filter_function):
"""
:param db_name: the source database name for changes
:param filter_function: function for filtering documents in changes
:return: an infinite_changes object
.. seealso:: :ref:`CouchDatabase.infinite_changes()`
"""
database = _start(db_name)
return database.infinite_changes(
feed='continuous',
include_docs=True,
filter=filter_function,
since='now')
def get_nodes(db_name=None):
"""Returns a list of nodes from the specified database.
If None is passed, default fingerprinting db will be used.
:param db_name: the database name
:return: a list of nodes
"""
if db_name is None:
db_name = fingerprints_db
query = Query(_start(db_name),
selector={'_id': {'$gt': None}},
fields=['_id', 'x', 'y', 'borders'],
use_index='_all_docs')
# return list(query.result) # return a list of dicts
return [Node(doc['_id'], x=doc['x'], y=doc['y'], borders=doc['borders']) for doc in query.result]
def load_nodes_from_csv_file(path):
"""Return a list of nodes, loaded from specified csv file.
:param path: the path to the file
:return: a list of nodes
"""
with open(path, 'r') as csv_file:
rows = csv.DictReader(csv_file)
return [Node(row['id'], x=row['x'], y=row['y'], borders=row['borders']) for row in rows]
def _convert_old_document_type(filename, db_name):
database = _start(db_name)
query = Query(database, selector={'_id': {'$gt': None}}, fields=['_id'], use_index='_all_docs')
with open(filename, mode='w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=CSV_FIELDS)
writer.writeheader()
doc_list = [doc['_id'] for doc in query.result]
doc_list.sort(key=lambda x: int(x))
for doc_id in doc_list:
print("Document", doc_id)
document = database[doc_id]
directions = document['Misurazioni']
for direction in directions.keys():
measures = directions[direction]
measure_keys = list(measures.keys())
measure_keys.sort(key=lambda x: int(x.replace('Misurazione ', '')))
for measure in measure_keys:
mv = measures[measure]['Vettore Magnetico']
dictionary = get_initialized_dict()
dictionary[ID_KEY] = doc_id
dictionary[X_KEY] = document['X position']
dictionary[Y_KEY] = document['Y position']
dictionary[BORDERS_KEY] = ''.join(Data.convert_direction(document['Borders']))
dictionary[DIRECTION_KEY] = ''.join(Data.convert_direction(direction))
dictionary[MEASURE_KEY] = measure.replace('Misurazione ', '')
dictionary[MV_X_KEY] = mv[0]
dictionary[MV_Y_KEY] = mv[1]
dictionary[MV_Z_KEY] = mv[2]
# WIFI LIST
rssi_list = list(measures[measure].keys())
if 'Vettore Magnetico' in rssi_list:
rssi_list.remove('Vettore Magnetico')
rssi_list.sort(key=lambda x: int(x.replace('RSSI ', ''))) # order list on number base
for rssi_key in rssi_list:
rssi = measures[measure][rssi_key]
if rssi['id'].strip() in CSV_FIELDS:
dictionary[rssi['id']] = rssi['value']
writer.writerow(dictionary)
print("\t", direction, "converted.")
def _convert_new_document_type(filename, db_name):
""" ``todo:: ble beacons and magnetic field need to be saved to the csv file ``
"""
database = _start(db_name)
query = Query(database, selector={'_id': {'$gt': None}}, fields=['_id'], use_index='_all_docs')
with open(filename, mode='w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=CSV_FIELDS)
writer.writeheader()
doc_list = [doc['_id'] for doc in query.result]
doc_list.sort(key=lambda x: int(x))
for doc_id in doc_list:
print("Document", doc_id)
document = database[doc_id]
measures = document['measures']
for direction in measures.keys():
wifi_list = measures[direction]['wifi']
# mv_list = measures[direction]['mv']
# ble = measures[direction]['ble'] # feature not used
index = 1
for measure in wifi_list:
dictionary = get_initialized_dict()
dictionary[ID_KEY] = document['_id']
dictionary[X_KEY] = document['x']
dictionary[Y_KEY] = document['y']
dictionary[BORDERS_KEY] = ''.join(Data.convert_direction(document['borders']))
dictionary[DIRECTION_KEY] = ''.join(Data.convert_direction(direction))
dictionary[MEASURE_KEY] = index
for wifi_node in measure:
if wifi_node['id'] in dictionary.keys():
dictionary[wifi_node['id']] = wifi_node['value']
writer.writerow(dictionary)
index += 1
print("\t", direction, "converted.")
def export_db_to_csv(filename, doc_type='new', db_name=None):
"""Exports all documents from specified database to a csv file.
:param filename: the csv file where to export documents
:param doc_type: the structure type of documents
:param db_name: the database to export
.. note:: Use 'new' as doc_type for new json document structure, 'old', for old one.
.. seealso:: :ref:`converter.py` for the new json document structure
"""
if _client is None:
initialize()
if db_name is None:
db_name = fingerprints_db
if doc_type == 'new':
_convert_new_document_type(filename, db_name)
else:
_convert_old_document_type(filename, db_name)
def get_initialized_dict():
"""Initializes a dictionary with pairs: ap mac address and -110 default rssi value.
:return: a dictionary populated by ap mac addresses and -110 default rssi values
"""
dictionary = {}.fromkeys(CSV_FIELDS)
for mac in Data.get_ap5ghz():
dictionary[mac] = -110
for mac in Data.get_ap24ghz():
dictionary[mac] = -110
return dictionary
if __name__ == '__main__':
# TESTS
initialize()
export_db_to_csv("fingerprints.csv", doc_type='old', db_name='fingerprints_backup')
close()
| en | 0.672612 | # TYPE_KEY = 'type' # feature not used Starts the connection with the server, which parameters are specified in the configuration file: config.ini. This function creates a localization db instance and returns it to the caller. :return localization_db_instance: the instance # Add filter function # ignore documents that are deleted or having type != `data_doc` This function creates an instance of the database specified and returns it to the caller. :return: the CouchDatabase instance Closes connection with server. :param db_name: the source database name for changes :param filter_function: function for filtering documents in changes :return: an infinite_changes object .. seealso:: :ref:`CouchDatabase.infinite_changes()` Returns a list of nodes from the specified database. If None is passed, default fingerprinting db will be used. :param db_name: the database name :return: a list of nodes # return list(query.result) # return a list of dicts Return a list of nodes, loaded from specified csv file. :param path: the path to the file :return: a list of nodes # WIFI LIST # order list on number base ``todo:: ble beacons and magnetic field need to be saved to the csv file `` # mv_list = measures[direction]['mv'] # ble = measures[direction]['ble'] # feature not used Exports all documents from specified database to a csv file. :param filename: the csv file where to export documents :param doc_type: the structure type of documents :param db_name: the database to export .. note:: Use 'new' as doc_type for new json document structure, 'old', for old one. .. seealso:: :ref:`converter.py` for the new json document structure Initializes a dictionary with pairs: ap mac address and -110 default rssi value. :return: a dictionary populated by ap mac addresses and -110 default rssi values # TESTS | 2.538249 | 3 |
src/pyrtable/query.py | vilarneto/pyrtable | 6 | 6633210 | <reponame>vilarneto/pyrtable<filename>src/pyrtable/query.py
import collections.abc
from typing import TYPE_CHECKING, Generic, Iterable, Iterator, TypeVar, Type, Optional
from ._baseandtable import _BaseAndTableSettableProtocol, BaseAndTable
if TYPE_CHECKING:
from .filters.base import BaseFilter
from .record import BaseRecord
RT = TypeVar('RT', bound='BaseRecord')
QT = TypeVar('QT', bound='RecordQuery')
class RecordQuery(BaseAndTable, Generic[RT, QT], Iterable[RT],
collections.abc.Iterable, _BaseAndTableSettableProtocol):
"""
A (potentially under construction) query for records in a table. Also represents the starting point for queries
to be made over a :class:`BaseRecord` derived class, exposed through the `objects` class attribute.
"""
def set_base_id(self, base_id: str) -> 'QT':
"""
Change the query's base ID.
:return: The resulting query.
"""
result = self._shallow_copy()
result._base_id = base_id
return result
def set_table_id(self, table_id: str) -> 'QT':
"""
Change the query's table ID.
:return: The resulting query.
"""
result = self._shallow_copy()
result._table_id = table_id
return result
_record_class: Type['BaseRecord']
_initialised = False
_is_empty_query = False
def __init__(self, record_class: Type['BaseRecord'], flt: Optional['BaseFilter'] = None):
super().__init__(base_id=record_class.get_class_base_id(), table_id=record_class.get_class_table_id())
self._record_class = record_class
self._filter = flt
def _shallow_copy(self) -> QT:
import copy
result = copy.copy(self)
return result
def _shallow_copy_and_initialise(self) -> QT:
result = self._shallow_copy()
result._initialised = True
return result
def all(self) -> QT:
"""
Return a query for all records, given that they are not filtered out by other criteria.
:return: The resulting query.
"""
if self._initialised:
return self
return self._shallow_copy_and_initialise()
def none(self) -> QT:
"""
Return an empty query. No server communication is needed to execute this query.
:return: The resulting query.
"""
if self._is_empty_query:
return self
result = self._shallow_copy_and_initialise()
result._is_empty_query = True
return result
def filter(self, *args, **kwargs) -> QT:
"""
Return a query that will respect the criteria given as arguments.
:return: The resulting query.
"""
if not args and not kwargs:
return self.all()
from .filters import Q
result = self._shallow_copy_and_initialise()
if result._filter is None:
result._filter = Q(*args, **kwargs)
elif isinstance(result._filter, Q):
result._filter.extend(*args, **kwargs)
else:
result._filter = Q(result._filter, *args, **kwargs)
return result
def get(self, record_id: str) -> RT:
"""
Return a single record with given identifier, if it exists in the table.
:return: The matching record.
:raises KeyError: if no record matches the given record ID.
:raises ValueError: if filters were applied (currently this is not supported).
"""
from pyrtable.context import get_default_context
# Trivial implementation for Record.objects.none().get(...)
if self._is_empty_query:
raise KeyError(record_id)
if self._filter is not None:
raise ValueError('Currently get() is not compatible with filters applied')
return get_default_context().fetch_single(
record_cls=self._record_class, record_id=record_id, base_and_table=self)
def __iter__(self) -> Iterator[RT]:
if not self._initialised:
raise ValueError('Query is not initialised. Use .all(), .filter() or .none() to initialise it.')
if self._is_empty_query:
return
from pyrtable.context import get_default_context
yield from get_default_context().fetch_many(
record_cls=self._record_class, base_and_table=self, record_filter=self._filter)
__all__ = ['RecordQuery']
| import collections.abc
from typing import TYPE_CHECKING, Generic, Iterable, Iterator, TypeVar, Type, Optional
from ._baseandtable import _BaseAndTableSettableProtocol, BaseAndTable
if TYPE_CHECKING:
from .filters.base import BaseFilter
from .record import BaseRecord
RT = TypeVar('RT', bound='BaseRecord')
QT = TypeVar('QT', bound='RecordQuery')
class RecordQuery(BaseAndTable, Generic[RT, QT], Iterable[RT],
collections.abc.Iterable, _BaseAndTableSettableProtocol):
"""
A (potentially under construction) query for records in a table. Also represents the starting point for queries
to be made over a :class:`BaseRecord` derived class, exposed through the `objects` class attribute.
"""
def set_base_id(self, base_id: str) -> 'QT':
"""
Change the query's base ID.
:return: The resulting query.
"""
result = self._shallow_copy()
result._base_id = base_id
return result
def set_table_id(self, table_id: str) -> 'QT':
"""
Change the query's table ID.
:return: The resulting query.
"""
result = self._shallow_copy()
result._table_id = table_id
return result
_record_class: Type['BaseRecord']
_initialised = False
_is_empty_query = False
def __init__(self, record_class: Type['BaseRecord'], flt: Optional['BaseFilter'] = None):
super().__init__(base_id=record_class.get_class_base_id(), table_id=record_class.get_class_table_id())
self._record_class = record_class
self._filter = flt
def _shallow_copy(self) -> QT:
import copy
result = copy.copy(self)
return result
def _shallow_copy_and_initialise(self) -> QT:
result = self._shallow_copy()
result._initialised = True
return result
def all(self) -> QT:
"""
Return a query for all records, given that they are not filtered out by other criteria.
:return: The resulting query.
"""
if self._initialised:
return self
return self._shallow_copy_and_initialise()
def none(self) -> QT:
"""
Return an empty query. No server communication is needed to execute this query.
:return: The resulting query.
"""
if self._is_empty_query:
return self
result = self._shallow_copy_and_initialise()
result._is_empty_query = True
return result
def filter(self, *args, **kwargs) -> QT:
"""
Return a query that will respect the criteria given as arguments.
:return: The resulting query.
"""
if not args and not kwargs:
return self.all()
from .filters import Q
result = self._shallow_copy_and_initialise()
if result._filter is None:
result._filter = Q(*args, **kwargs)
elif isinstance(result._filter, Q):
result._filter.extend(*args, **kwargs)
else:
result._filter = Q(result._filter, *args, **kwargs)
return result
def get(self, record_id: str) -> RT:
"""
Return a single record with given identifier, if it exists in the table.
:return: The matching record.
:raises KeyError: if no record matches the given record ID.
:raises ValueError: if filters were applied (currently this is not supported).
"""
from pyrtable.context import get_default_context
# Trivial implementation for Record.objects.none().get(...)
if self._is_empty_query:
raise KeyError(record_id)
if self._filter is not None:
raise ValueError('Currently get() is not compatible with filters applied')
return get_default_context().fetch_single(
record_cls=self._record_class, record_id=record_id, base_and_table=self)
def __iter__(self) -> Iterator[RT]:
if not self._initialised:
raise ValueError('Query is not initialised. Use .all(), .filter() or .none() to initialise it.')
if self._is_empty_query:
return
from pyrtable.context import get_default_context
yield from get_default_context().fetch_many(
record_cls=self._record_class, base_and_table=self, record_filter=self._filter)
__all__ = ['RecordQuery'] | en | 0.844858 | A (potentially under construction) query for records in a table. Also represents the starting point for queries to be made over a :class:`BaseRecord` derived class, exposed through the `objects` class attribute. Change the query's base ID. :return: The resulting query. Change the query's table ID. :return: The resulting query. Return a query for all records, given that they are not filtered out by other criteria. :return: The resulting query. Return an empty query. No server communication is needed to execute this query. :return: The resulting query. Return a query that will respect the criteria given as arguments. :return: The resulting query. Return a single record with given identifier, if it exists in the table. :return: The matching record. :raises KeyError: if no record matches the given record ID. :raises ValueError: if filters were applied (currently this is not supported). # Trivial implementation for Record.objects.none().get(...) | 2.400699 | 2 |
helpers.py | AnJ95/CoP-Bot | 2 | 6633211 | from telegram import Update, Message, Bot
from telegram.ext import CallbackContext
from state import state
def admin(method):
def secured(update: Update, context: CallbackContext):
msg: Message = update.message
if not state.check_admin(msg.from_user):
context.bot.send_message(msg.chat_id, f"You are not authorized for this command!")
return
method(update, context)
return secured
def private(method):
def secured(update: Update, context: CallbackContext):
msg: Message = update.message
bot: Bot = context.bot
msg_type: str = msg.chat.type # 'private', 'group', 'supergroup' or 'channel'
if msg_type != "private":
bot.send_message(msg.chat_id, f"The command has to be executed in a private channel!")
return
method(update, context)
return secured
def current_user(method):
def secured(update: Update, context: CallbackContext):
msg: Message = update.message
if not state.is_challenge_from(msg.from_user):
context.bot.send_message(msg.chat_id, f"You are not the current user!")
return
method(update, context)
return secured
| from telegram import Update, Message, Bot
from telegram.ext import CallbackContext
from state import state
def admin(method):
def secured(update: Update, context: CallbackContext):
msg: Message = update.message
if not state.check_admin(msg.from_user):
context.bot.send_message(msg.chat_id, f"You are not authorized for this command!")
return
method(update, context)
return secured
def private(method):
def secured(update: Update, context: CallbackContext):
msg: Message = update.message
bot: Bot = context.bot
msg_type: str = msg.chat.type # 'private', 'group', 'supergroup' or 'channel'
if msg_type != "private":
bot.send_message(msg.chat_id, f"The command has to be executed in a private channel!")
return
method(update, context)
return secured
def current_user(method):
def secured(update: Update, context: CallbackContext):
msg: Message = update.message
if not state.is_challenge_from(msg.from_user):
context.bot.send_message(msg.chat_id, f"You are not the current user!")
return
method(update, context)
return secured
| en | 0.233171 | # 'private', 'group', 'supergroup' or 'channel' | 2.567292 | 3 |
situation/settings.tmpl.py | chriskuehl/kloudless-status | 0 | 6633212 | # Notice:
# If you are running this in production environment, generate
# these for your app at https://dev.twitter.com/apps/new
TWITTER = {
'AUTH': {
'consumer_key': 'XXXX',
'consumer_secret': 'XXXX',
'token': 'XXXX',
'token_secret': 'XXXX',
}
}
# The e-mail address to send notifications from
EMAIL = {
'sender': 'Kloudless Status <<EMAIL>>'
}
DEBUG = True
# Currently DASHBOARD does not send out notifications
NOTIFY_SERVICES = ['API', 'JS']
| # Notice:
# If you are running this in production environment, generate
# these for your app at https://dev.twitter.com/apps/new
TWITTER = {
'AUTH': {
'consumer_key': 'XXXX',
'consumer_secret': 'XXXX',
'token': 'XXXX',
'token_secret': 'XXXX',
}
}
# The e-mail address to send notifications from
EMAIL = {
'sender': 'Kloudless Status <<EMAIL>>'
}
DEBUG = True
# Currently DASHBOARD does not send out notifications
NOTIFY_SERVICES = ['API', 'JS']
| en | 0.884892 | # Notice: # If you are running this in production environment, generate # these for your app at https://dev.twitter.com/apps/new # The e-mail address to send notifications from # Currently DASHBOARD does not send out notifications | 1.727078 | 2 |
model.py | robinmemminger/auditor1 | 0 | 6633213 | from sqlalchemy import Column, String, Integer, Float, DateTime, Boolean, BigInteger
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
def set_table(table, merge):
class Database(Base):
__tablename__ = table
SortingIndex = Column(Integer)
ItemType = Column(String(20))
Label = Column(String(None))
Response = Column(String(None))
Comment = Column(String(None))
MediaHypertextReference = Column(String(None))
Latitude = Column(String(50))
Longitude = Column(String(50))
ItemScore = Column(Float)
ItemMaxScore = Column(Float)
ItemScorePercentage = Column(Float)
Mandatory = Column(Boolean)
FailedResponse = Column(Boolean)
Inactive = Column(Boolean)
AuditID = Column(String(100), primary_key=True, autoincrement=False)
ItemID = Column(String(100), primary_key=True, autoincrement=False)
if merge is False:
DatePK = Column(String(20), primary_key=True, autoincrement=False)
else:
DatePK = Column(String(20))
ResponseID = Column(String(None))
ParentID = Column(String(100))
AuditOwner = Column(String(None))
AuditAuthor = Column(String(None))
AuditOwnerID = Column(String(None))
AuditAuthorID = Column(String(100))
AuditName = Column(String(None))
AuditScore = Column(Float)
AuditMaxScore = Column(Float)
AuditScorePercentage = Column(Float)
AuditDuration = Column(Float)
DateStarted = Column(DateTime)
DateCompleted = Column(DateTime)
DateModified = Column(DateTime)
TemplateID = Column(String(100))
TemplateName = Column(String(None))
TemplateAuthor = Column(String(None))
TemplateAuthorID = Column(String(100))
ItemCategory = Column(String(None))
RepeatingSectionParentID = Column(String(100))
DocumentNo = Column(String(None))
ConductedOn = Column(DateTime)
PreparedBy = Column(String(None))
Location = Column(String(None))
Personnel = Column(String(None))
ClientSite = Column(String(None))
AuditSite = Column(String(None))
AuditArea = Column(String(None))
AuditRegion = Column(String(None))
Archived = Column(Boolean)
return Database
SQL_HEADER_ROW = [
'SortingIndex',
'ItemType',
'Label',
'Response',
'Comment',
'MediaHypertextReference',
'Latitude',
'Longitude',
'ItemScore',
'ItemMaxScore',
'ItemScorePercentage',
'Mandatory',
'FailedResponse',
'Inactive',
'ItemID',
'ResponseID',
'ParentID',
'AuditOwner',
'AuditAuthor',
'AuditOwnerID',
'AuditAuthorID',
'AuditName',
'AuditScore',
'AuditMaxScore',
'AuditScorePercentage',
'AuditDuration',
'DateStarted',
'DateCompleted',
'DateModified',
'AuditID',
'TemplateID',
'TemplateName',
'TemplateAuthor',
'TemplateAuthorID',
'ItemCategory',
'RepeatingSectionParentID',
'DocumentNo',
'ConductedOn',
'PreparedBy',
'Location',
'Personnel',
'ClientSite',
'AuditSite',
'AuditArea',
'AuditRegion',
'Archived'
]
def set_actions_table(table, merge):
class ActionsDatabase(Base):
__tablename__ = table
id = Column(Integer, primary_key=False, autoincrement=True)
description = Column(String(None))
assignee = Column(String(None))
priority = Column(String(None))
priorityCode = Column(Integer)
status = Column(String(20))
statusCode = Column(Integer)
dueDatetime = Column(DateTime)
actionId = Column(String(100), primary_key=True, autoincrement=False)
if merge is False:
DatePK = Column(BigInteger, autoincrement=False)
else:
DatePK = Column(BigInteger, primary_key=True, autoincrement=False)
audit = Column(String(None))
auditId = Column(String(50))
linkedToItem = Column(String(None))
linkedToItemId = Column(String(50))
creatorName = Column(String(None))
creatorId = Column(String(50))
createdDatetime = Column(DateTime)
modifiedDatetime = Column(DateTime)
completedDatetime = Column(DateTime)
return ActionsDatabase
ACTIONS_HEADER_ROW = [
'actionId',
'description',
'assignee',
'priority',
'priorityCode',
'status',
'statusCode',
'dueDatetime',
'audit',
'auditId',
'linkedToItem',
'linkedToItemId',
'creatorName',
'creatorId',
'createdDatetime',
'modifiedDatetime',
'completedDatetime'
] | from sqlalchemy import Column, String, Integer, Float, DateTime, Boolean, BigInteger
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
def set_table(table, merge):
class Database(Base):
__tablename__ = table
SortingIndex = Column(Integer)
ItemType = Column(String(20))
Label = Column(String(None))
Response = Column(String(None))
Comment = Column(String(None))
MediaHypertextReference = Column(String(None))
Latitude = Column(String(50))
Longitude = Column(String(50))
ItemScore = Column(Float)
ItemMaxScore = Column(Float)
ItemScorePercentage = Column(Float)
Mandatory = Column(Boolean)
FailedResponse = Column(Boolean)
Inactive = Column(Boolean)
AuditID = Column(String(100), primary_key=True, autoincrement=False)
ItemID = Column(String(100), primary_key=True, autoincrement=False)
if merge is False:
DatePK = Column(String(20), primary_key=True, autoincrement=False)
else:
DatePK = Column(String(20))
ResponseID = Column(String(None))
ParentID = Column(String(100))
AuditOwner = Column(String(None))
AuditAuthor = Column(String(None))
AuditOwnerID = Column(String(None))
AuditAuthorID = Column(String(100))
AuditName = Column(String(None))
AuditScore = Column(Float)
AuditMaxScore = Column(Float)
AuditScorePercentage = Column(Float)
AuditDuration = Column(Float)
DateStarted = Column(DateTime)
DateCompleted = Column(DateTime)
DateModified = Column(DateTime)
TemplateID = Column(String(100))
TemplateName = Column(String(None))
TemplateAuthor = Column(String(None))
TemplateAuthorID = Column(String(100))
ItemCategory = Column(String(None))
RepeatingSectionParentID = Column(String(100))
DocumentNo = Column(String(None))
ConductedOn = Column(DateTime)
PreparedBy = Column(String(None))
Location = Column(String(None))
Personnel = Column(String(None))
ClientSite = Column(String(None))
AuditSite = Column(String(None))
AuditArea = Column(String(None))
AuditRegion = Column(String(None))
Archived = Column(Boolean)
return Database
SQL_HEADER_ROW = [
'SortingIndex',
'ItemType',
'Label',
'Response',
'Comment',
'MediaHypertextReference',
'Latitude',
'Longitude',
'ItemScore',
'ItemMaxScore',
'ItemScorePercentage',
'Mandatory',
'FailedResponse',
'Inactive',
'ItemID',
'ResponseID',
'ParentID',
'AuditOwner',
'AuditAuthor',
'AuditOwnerID',
'AuditAuthorID',
'AuditName',
'AuditScore',
'AuditMaxScore',
'AuditScorePercentage',
'AuditDuration',
'DateStarted',
'DateCompleted',
'DateModified',
'AuditID',
'TemplateID',
'TemplateName',
'TemplateAuthor',
'TemplateAuthorID',
'ItemCategory',
'RepeatingSectionParentID',
'DocumentNo',
'ConductedOn',
'PreparedBy',
'Location',
'Personnel',
'ClientSite',
'AuditSite',
'AuditArea',
'AuditRegion',
'Archived'
]
def set_actions_table(table, merge):
class ActionsDatabase(Base):
__tablename__ = table
id = Column(Integer, primary_key=False, autoincrement=True)
description = Column(String(None))
assignee = Column(String(None))
priority = Column(String(None))
priorityCode = Column(Integer)
status = Column(String(20))
statusCode = Column(Integer)
dueDatetime = Column(DateTime)
actionId = Column(String(100), primary_key=True, autoincrement=False)
if merge is False:
DatePK = Column(BigInteger, autoincrement=False)
else:
DatePK = Column(BigInteger, primary_key=True, autoincrement=False)
audit = Column(String(None))
auditId = Column(String(50))
linkedToItem = Column(String(None))
linkedToItemId = Column(String(50))
creatorName = Column(String(None))
creatorId = Column(String(50))
createdDatetime = Column(DateTime)
modifiedDatetime = Column(DateTime)
completedDatetime = Column(DateTime)
return ActionsDatabase
ACTIONS_HEADER_ROW = [
'actionId',
'description',
'assignee',
'priority',
'priorityCode',
'status',
'statusCode',
'dueDatetime',
'audit',
'auditId',
'linkedToItem',
'linkedToItemId',
'creatorName',
'creatorId',
'createdDatetime',
'modifiedDatetime',
'completedDatetime'
] | none | 1 | 2.309415 | 2 |
|
ale/drivers/__init__.py | kaitlyndlee/ale | 0 | 6633214 | import pvl
import zlib
import importlib
import inspect
import itertools
from itertools import chain
import os
from glob import glob
import json
import numpy as np
import datetime
from datetime import datetime, date
import traceback
from collections import OrderedDict
from ale.formatters.usgscsm_formatter import to_usgscsm
from ale.formatters.isis_formatter import to_isis
from ale.base.data_isis import IsisSpice
from abc import ABC
# dynamically load drivers
__all__ = [os.path.splitext(os.path.basename(d))[0] for d in glob(os.path.join(os.path.dirname(__file__), '*_drivers.py'))]
__driver_modules__ = [importlib.import_module('.'+m, package='ale.drivers') for m in __all__]
__formatters__ = {'usgscsm': to_usgscsm,
'isis': to_isis}
def sort_drivers(drivers=[]):
return list(sorted(drivers, key=lambda x:IsisSpice in x.__bases__, reverse=False))
class AleJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, datetime.date):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def load(label, props={}, formatter='usgscsm', verbose=False):
"""
Attempt to load a given label from all possible drivers
Parameters
----------
label : str
String path to the given label file
"""
if isinstance(formatter, str):
formatter = __formatters__[formatter]
drivers = chain.from_iterable(inspect.getmembers(dmod, lambda x: inspect.isclass(x) and "_driver" in x.__module__) for dmod in __driver_modules__)
drivers = sort_drivers([d[1] for d in drivers])
for driver in drivers:
if verbose:
print(f'Trying {driver}')
try:
res = driver(label, props=props)
# get instrument_id to force early failure
res.instrument_id
with res as driver:
isd = formatter(driver)
if verbose:
print("Success with: ", driver)
print("ISD:\n", json.dumps(isd, indent=2, cls=AleJsonEncoder))
return isd
except Exception as e:
if verbose:
print(f'Failed: {e}\n')
traceback.print_exc()
raise Exception('No Such Driver for Label')
def loads(label, props='', formatter='usgscsm', verbose=False):
res = load(label, props, formatter, verbose=verbose)
return json.dumps(res, cls=AleJsonEncoder)
| import pvl
import zlib
import importlib
import inspect
import itertools
from itertools import chain
import os
from glob import glob
import json
import numpy as np
import datetime
from datetime import datetime, date
import traceback
from collections import OrderedDict
from ale.formatters.usgscsm_formatter import to_usgscsm
from ale.formatters.isis_formatter import to_isis
from ale.base.data_isis import IsisSpice
from abc import ABC
# dynamically load drivers
__all__ = [os.path.splitext(os.path.basename(d))[0] for d in glob(os.path.join(os.path.dirname(__file__), '*_drivers.py'))]
__driver_modules__ = [importlib.import_module('.'+m, package='ale.drivers') for m in __all__]
__formatters__ = {'usgscsm': to_usgscsm,
'isis': to_isis}
def sort_drivers(drivers=[]):
return list(sorted(drivers, key=lambda x:IsisSpice in x.__bases__, reverse=False))
class AleJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, datetime.date):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def load(label, props={}, formatter='usgscsm', verbose=False):
"""
Attempt to load a given label from all possible drivers
Parameters
----------
label : str
String path to the given label file
"""
if isinstance(formatter, str):
formatter = __formatters__[formatter]
drivers = chain.from_iterable(inspect.getmembers(dmod, lambda x: inspect.isclass(x) and "_driver" in x.__module__) for dmod in __driver_modules__)
drivers = sort_drivers([d[1] for d in drivers])
for driver in drivers:
if verbose:
print(f'Trying {driver}')
try:
res = driver(label, props=props)
# get instrument_id to force early failure
res.instrument_id
with res as driver:
isd = formatter(driver)
if verbose:
print("Success with: ", driver)
print("ISD:\n", json.dumps(isd, indent=2, cls=AleJsonEncoder))
return isd
except Exception as e:
if verbose:
print(f'Failed: {e}\n')
traceback.print_exc()
raise Exception('No Such Driver for Label')
def loads(label, props='', formatter='usgscsm', verbose=False):
res = load(label, props, formatter, verbose=verbose)
return json.dumps(res, cls=AleJsonEncoder)
| en | 0.68439 | # dynamically load drivers Attempt to load a given label from all possible drivers Parameters ---------- label : str String path to the given label file # get instrument_id to force early failure | 2.128569 | 2 |
class6/exercises/ex7_pynxos_config.py | ktbyers/python_course | 24 | 6633215 | <filename>class6/exercises/ex7_pynxos_config.py<gh_stars>10-100
#!/usr/bin/env python
"""
Use the pynxos library to configure a loopback interface on nxos1. Choose a random
loopback interface number between 1 and 99.
Assign the loopback interface an IP address in the 172.16.0.0 - 172.31.255.255. Use
a /32 netmask.
Execute a 'show run interface loopbackX' command using NX-API to verify your interface
was configured properly. For example:
nxapi_conn.show('show run interface loopback99', raw_text=True)
Note, you will need to use 'raw_text=True' for this command.
"""
from __future__ import print_function, unicode_literals
from pynxos.device import Device
from getpass import getpass
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def main():
password = getpass()
nxos1 = {
'host': 'nxos1.twb-tech.com',
'username': 'pyclass',
'password': password,
'transport': 'https',
'port': 8443,
}
nxos2 = { # noqa
'host': 'nxos2.twb-tech.com',
'username': 'pyclass',
'password': password,
'transport': 'https',
'port': 8443,
}
config_commands = ['interface Loopback99', 'ip address 172.31.254.99/32']
for device in (nxos1,):
nxapi_conn = Device(**device)
nxapi_conn.config_list(config_commands)
output = nxapi_conn.show('show run interface loopback99', raw_text=True)
print(output)
if __name__ == "__main__":
main()
| <filename>class6/exercises/ex7_pynxos_config.py<gh_stars>10-100
#!/usr/bin/env python
"""
Use the pynxos library to configure a loopback interface on nxos1. Choose a random
loopback interface number between 1 and 99.
Assign the loopback interface an IP address in the 172.16.0.0 - 172.31.255.255. Use
a /32 netmask.
Execute a 'show run interface loopbackX' command using NX-API to verify your interface
was configured properly. For example:
nxapi_conn.show('show run interface loopback99', raw_text=True)
Note, you will need to use 'raw_text=True' for this command.
"""
from __future__ import print_function, unicode_literals
from pynxos.device import Device
from getpass import getpass
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def main():
password = getpass()
nxos1 = {
'host': 'nxos1.twb-tech.com',
'username': 'pyclass',
'password': password,
'transport': 'https',
'port': 8443,
}
nxos2 = { # noqa
'host': 'nxos2.twb-tech.com',
'username': 'pyclass',
'password': password,
'transport': 'https',
'port': 8443,
}
config_commands = ['interface Loopback99', 'ip address 172.31.254.99/32']
for device in (nxos1,):
nxapi_conn = Device(**device)
nxapi_conn.config_list(config_commands)
output = nxapi_conn.show('show run interface loopback99', raw_text=True)
print(output)
if __name__ == "__main__":
main()
| en | 0.599688 | #!/usr/bin/env python Use the pynxos library to configure a loopback interface on nxos1. Choose a random loopback interface number between 1 and 99. Assign the loopback interface an IP address in the 172.16.0.0 - 172.31.255.255. Use a /32 netmask. Execute a 'show run interface loopbackX' command using NX-API to verify your interface was configured properly. For example: nxapi_conn.show('show run interface loopback99', raw_text=True) Note, you will need to use 'raw_text=True' for this command. # noqa | 2.773991 | 3 |
fsociety/information_gathering/sublist3r.py | mehedieh/fsociety | 0 | 6633216 | import os
from fsociety.core.repo import GitHubRepo
from fsociety.core.menu import set_readline
class Sublist3rRepo(GitHubRepo):
def __init__(self):
super().__init__(
path="aboul3la/Sublist3r",
install={"pip": "requirements.txt"},
description=
"Fast subdomains enumeration tool for penetration testers")
def run(self):
os.chdir(self.full_path)
set_readline([])
user_domain = input("\nEnter a domain to enumerate: ").strip()
return os.system(f"python3 sublist3r.py -v -d {user_domain}")
sublist3r = Sublist3rRepo()
| import os
from fsociety.core.repo import GitHubRepo
from fsociety.core.menu import set_readline
class Sublist3rRepo(GitHubRepo):
def __init__(self):
super().__init__(
path="aboul3la/Sublist3r",
install={"pip": "requirements.txt"},
description=
"Fast subdomains enumeration tool for penetration testers")
def run(self):
os.chdir(self.full_path)
set_readline([])
user_domain = input("\nEnter a domain to enumerate: ").strip()
return os.system(f"python3 sublist3r.py -v -d {user_domain}")
sublist3r = Sublist3rRepo()
| none | 1 | 2.135294 | 2 |
|
apps/courses/migrations/0001_initial.py | bopopescu/diandian_online | 3 | 6633217 | # Generated by Django 2.1.7 on 2019-02-19 18:44
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lesson_name', models.CharField(max_length=100, verbose_name='章节名称')),
('add_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='章节添加时间')),
],
options={
'verbose_name': '章节',
'verbose_name_plural': '章节',
},
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_name', models.CharField(max_length=50, verbose_name='课程名称')),
('description', models.TextField(max_length=500, verbose_name='课程描述')),
('detail', models.TextField(verbose_name='课程详情')),
('degree', models.CharField(choices=[('easy', '初级'), ('normal', '中级'), ('hard', '高级')], max_length=10, verbose_name='课程难度')),
('learn_times', models.IntegerField(default=0, verbose_name='学习时长(分钟数)')),
('student_number', models.IntegerField(default=0, verbose_name='学习人数')),
('collect_number', models.IntegerField(default=0, verbose_name='收藏人数')),
('course_mark', models.FloatField(default=10.0, verbose_name='课程总评分')),
('course_image', models.ImageField(max_length=500, upload_to='courses/static/courses/image/%Y/%m', verbose_name='课程图片')),
('click_number', models.IntegerField(default=0, verbose_name='点击量')),
('add_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='课程添加时间')),
],
options={
'verbose_name': '课程',
'verbose_name_plural': '课程',
},
),
migrations.CreateModel(
name='CourseResource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('resource_name', models.CharField(max_length=50, verbose_name='资源名称')),
('download', models.FileField(max_length=200, upload_to='courses/static/courses/resource/%Y/%m', verbose_name='资源文件')),
('add_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='资源添加时间')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Course', verbose_name='课程相关资源')),
],
options={
'verbose_name': '课程相关资源',
'verbose_name_plural': '课程相关资源',
},
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lesson_name', models.CharField(max_length=100, verbose_name='章节名称')),
('add_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='小节添加时间')),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Chapter', verbose_name='小节名称')),
],
options={
'verbose_name': '小节',
'verbose_name_plural': '小节',
},
),
migrations.AddField(
model_name='chapter',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Course', verbose_name='章节名称'),
),
]
| # Generated by Django 2.1.7 on 2019-02-19 18:44
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lesson_name', models.CharField(max_length=100, verbose_name='章节名称')),
('add_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='章节添加时间')),
],
options={
'verbose_name': '章节',
'verbose_name_plural': '章节',
},
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_name', models.CharField(max_length=50, verbose_name='课程名称')),
('description', models.TextField(max_length=500, verbose_name='课程描述')),
('detail', models.TextField(verbose_name='课程详情')),
('degree', models.CharField(choices=[('easy', '初级'), ('normal', '中级'), ('hard', '高级')], max_length=10, verbose_name='课程难度')),
('learn_times', models.IntegerField(default=0, verbose_name='学习时长(分钟数)')),
('student_number', models.IntegerField(default=0, verbose_name='学习人数')),
('collect_number', models.IntegerField(default=0, verbose_name='收藏人数')),
('course_mark', models.FloatField(default=10.0, verbose_name='课程总评分')),
('course_image', models.ImageField(max_length=500, upload_to='courses/static/courses/image/%Y/%m', verbose_name='课程图片')),
('click_number', models.IntegerField(default=0, verbose_name='点击量')),
('add_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='课程添加时间')),
],
options={
'verbose_name': '课程',
'verbose_name_plural': '课程',
},
),
migrations.CreateModel(
name='CourseResource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('resource_name', models.CharField(max_length=50, verbose_name='资源名称')),
('download', models.FileField(max_length=200, upload_to='courses/static/courses/resource/%Y/%m', verbose_name='资源文件')),
('add_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='资源添加时间')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Course', verbose_name='课程相关资源')),
],
options={
'verbose_name': '课程相关资源',
'verbose_name_plural': '课程相关资源',
},
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lesson_name', models.CharField(max_length=100, verbose_name='章节名称')),
('add_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='小节添加时间')),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Chapter', verbose_name='小节名称')),
],
options={
'verbose_name': '小节',
'verbose_name_plural': '小节',
},
),
migrations.AddField(
model_name='chapter',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Course', verbose_name='章节名称'),
),
]
| en | 0.751872 | # Generated by Django 2.1.7 on 2019-02-19 18:44 | 1.815694 | 2 |
supcon/models.py | deepneuralmachine/google-research | 23,901 | 6633218 | <gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The contrastive model."""
import tensorflow.compat.v1 as tf
from supcon import classification_head
from supcon import enums
from supcon import projection_head
from supcon import resnet
class ContrastiveModel(tf.layers.Layer):
"""A model suitable for contrastive training with different backbone networks.
Attributes:
architecture: An enums.EncoderArchitecture. The type of the architecture to
use for the encoder.
normalize_projection_head_input: Whether the encoder output that is the
input to the projection head should be normalized.
normalize_classification_head_input: Whether the encoder output that is the
input to the classification head should be normalized.
jointly_train_classification_head: Whether the classification head is
trained simultaneously with the encoder. If false, a stop_gradient is
added between the classification head and the encoder.
encoder_kwargs: Keyword arguments that are passed on to the constructor of
the encoder. The specific encoder implementation is determined by
`architecture`.
projection_head_kwargs: Keyword arguments that are passed on to the
constructor of the projection head. These are the arguments to
`projection_head.ProjectionHead`.
classification_head_kwargs: Keyword arguments that are passed on to the
constructor of the classification head. These are the arguments to
`classification_head.ClassificationHead`.
name: A name for this object.
"""
def __init__(self,
architecture=enums.EncoderArchitecture.RESNET_V1,
normalize_projection_head_input=True,
normalize_classification_head_input=True,
stop_gradient_before_projection_head=False,
stop_gradient_before_classification_head=True,
encoder_kwargs=None,
projection_head_kwargs=None,
classification_head_kwargs=None,
name='ContrastiveModel',
**kwargs):
super(ContrastiveModel, self).__init__(name=name, **kwargs)
self.normalize_projection_head_input = normalize_projection_head_input
self.normalize_classification_head_input = (
normalize_classification_head_input)
self.stop_gradient_before_projection_head = (
stop_gradient_before_projection_head)
self.stop_gradient_before_classification_head = (
stop_gradient_before_classification_head)
encoder_fns = {
enums.EncoderArchitecture.RESNET_V1: resnet.ResNetV1,
enums.EncoderArchitecture.RESNEXT: resnet.ResNext,
}
if architecture not in encoder_fns:
raise ValueError(f'Architecture should be one of {encoder_fns.keys()}, '
f'found: {architecture}.')
encoder_fn = encoder_fns[architecture]
assert encoder_kwargs is not None
projection_head_kwargs = projection_head_kwargs or {}
classification_head_kwargs = classification_head_kwargs or {}
self.encoder = encoder_fn(name='Encoder', **encoder_kwargs)
self.projection_head = projection_head.ProjectionHead(
**projection_head_kwargs)
self.classification_head = classification_head.ClassificationHead(
**classification_head_kwargs)
def call(self, inputs, training):
embedding = self.encoder(inputs, training)
normalized_embedding = tf.nn.l2_normalize(embedding, axis=1)
projection_input = (
normalized_embedding
if self.normalize_projection_head_input else embedding)
if self.stop_gradient_before_projection_head:
projection_input = tf.stop_gradient(projection_input)
projection = self.projection_head(projection_input, training)
classification_input = (
normalized_embedding
if self.normalize_classification_head_input else embedding)
if self.stop_gradient_before_classification_head:
classification_input = tf.stop_gradient(classification_input)
classification = self.classification_head(classification_input, training)
return embedding, normalized_embedding, projection, classification
| # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The contrastive model."""
import tensorflow.compat.v1 as tf
from supcon import classification_head
from supcon import enums
from supcon import projection_head
from supcon import resnet
class ContrastiveModel(tf.layers.Layer):
"""A model suitable for contrastive training with different backbone networks.
Attributes:
architecture: An enums.EncoderArchitecture. The type of the architecture to
use for the encoder.
normalize_projection_head_input: Whether the encoder output that is the
input to the projection head should be normalized.
normalize_classification_head_input: Whether the encoder output that is the
input to the classification head should be normalized.
jointly_train_classification_head: Whether the classification head is
trained simultaneously with the encoder. If false, a stop_gradient is
added between the classification head and the encoder.
encoder_kwargs: Keyword arguments that are passed on to the constructor of
the encoder. The specific encoder implementation is determined by
`architecture`.
projection_head_kwargs: Keyword arguments that are passed on to the
constructor of the projection head. These are the arguments to
`projection_head.ProjectionHead`.
classification_head_kwargs: Keyword arguments that are passed on to the
constructor of the classification head. These are the arguments to
`classification_head.ClassificationHead`.
name: A name for this object.
"""
def __init__(self,
architecture=enums.EncoderArchitecture.RESNET_V1,
normalize_projection_head_input=True,
normalize_classification_head_input=True,
stop_gradient_before_projection_head=False,
stop_gradient_before_classification_head=True,
encoder_kwargs=None,
projection_head_kwargs=None,
classification_head_kwargs=None,
name='ContrastiveModel',
**kwargs):
super(ContrastiveModel, self).__init__(name=name, **kwargs)
self.normalize_projection_head_input = normalize_projection_head_input
self.normalize_classification_head_input = (
normalize_classification_head_input)
self.stop_gradient_before_projection_head = (
stop_gradient_before_projection_head)
self.stop_gradient_before_classification_head = (
stop_gradient_before_classification_head)
encoder_fns = {
enums.EncoderArchitecture.RESNET_V1: resnet.ResNetV1,
enums.EncoderArchitecture.RESNEXT: resnet.ResNext,
}
if architecture not in encoder_fns:
raise ValueError(f'Architecture should be one of {encoder_fns.keys()}, '
f'found: {architecture}.')
encoder_fn = encoder_fns[architecture]
assert encoder_kwargs is not None
projection_head_kwargs = projection_head_kwargs or {}
classification_head_kwargs = classification_head_kwargs or {}
self.encoder = encoder_fn(name='Encoder', **encoder_kwargs)
self.projection_head = projection_head.ProjectionHead(
**projection_head_kwargs)
self.classification_head = classification_head.ClassificationHead(
**classification_head_kwargs)
def call(self, inputs, training):
embedding = self.encoder(inputs, training)
normalized_embedding = tf.nn.l2_normalize(embedding, axis=1)
projection_input = (
normalized_embedding
if self.normalize_projection_head_input else embedding)
if self.stop_gradient_before_projection_head:
projection_input = tf.stop_gradient(projection_input)
projection = self.projection_head(projection_input, training)
classification_input = (
normalized_embedding
if self.normalize_classification_head_input else embedding)
if self.stop_gradient_before_classification_head:
classification_input = tf.stop_gradient(classification_input)
classification = self.classification_head(classification_input, training)
return embedding, normalized_embedding, projection, classification | en | 0.799581 | # coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 The contrastive model. A model suitable for contrastive training with different backbone networks. Attributes: architecture: An enums.EncoderArchitecture. The type of the architecture to use for the encoder. normalize_projection_head_input: Whether the encoder output that is the input to the projection head should be normalized. normalize_classification_head_input: Whether the encoder output that is the input to the classification head should be normalized. jointly_train_classification_head: Whether the classification head is trained simultaneously with the encoder. If false, a stop_gradient is added between the classification head and the encoder. encoder_kwargs: Keyword arguments that are passed on to the constructor of the encoder. The specific encoder implementation is determined by `architecture`. projection_head_kwargs: Keyword arguments that are passed on to the constructor of the projection head. These are the arguments to `projection_head.ProjectionHead`. classification_head_kwargs: Keyword arguments that are passed on to the constructor of the classification head. These are the arguments to `classification_head.ClassificationHead`. name: A name for this object. | 2.235243 | 2 |
parser/fase2/team20/execution/execute.py | diegog56/tytus | 0 | 6633219 | #Importacion de metodos de ejecucion
#Se utilizan archivos separados para minimizar los conflictos
from .AST.sentence import *
from .executeSentence import executeSentence
from .executeSentence2 import executeSentence2
from .executeInstruction import executeInstruction
from .generateASTReport import graphAST
from .generateSymbolTableReport import printSymbolTable
from .execute_result import *
from .storageManager.TypeChecker import *
from io import StringIO # Python3
import sys
path_c3d = "C3D.py"
class Execute():
nodes = []
errors = []
messages = []
querys = []
ts = []
pila =[] # cada item es un diccionario {resultado,argumento1,argumento2,operacion}
plcode = ""
#intermediate = IntermediateFunctions()
types = {
1: 'Entero',
2: 'Decimal',
3: 'Cadena',
4: 'Variable',
5: 'Regex'
}
def __init__(self, nodes):
self.tempcount = -1
self.labelcount = -1
self.nodes = nodes
self.errors = []
self.messages = []
self.querys = []
# def __init__(self, nodes, errors):
# self.nodes = nodes
# self.errors = errors
#Aqui va metodo principal ejecutar, al que se le enviara la raiz del AST
#y se encargaran de llamar el resto de metodos
def execute(self):
if(self.nodes is not None):
global path_c3d
archivo = open(path_c3d, 'w+')
archivo.write("from execution.executeSentence import executeSentence ")
archivo.write("\nfrom execution.AST.sentence import *")
archivo.write("\nfrom execution.AST.expression import *")
archivo.write("\nfrom execution.executeInstruction import createFunction, deleteFunction")
archivo.write("\nfrom console import print_error, print_success, print_warning, print_text")
archivo.write("\nfrom goto import with_goto")
archivo.write("\nimport math")
archivo.write("\n\n@with_goto")
archivo.write("\ndef up():")
archivo.write("\n\tprint(1)\n")
archivo.close()
if(len(self.nodes)==0):
archivo = open(path_c3d, 'a')
archivo.write("\n\tprint(1)\n")
archivo.close()
for node in self.nodes:
#pprint(vars(node))
if isinstance(node,Sentence):
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
print(node)
val1 = new_stdout.getvalue()[:-1]
sys.stdout = old_stdout
archivo = open(path_c3d, 'a')
archivo.write("\n\t")
archivo.write(val1)
archivo.close()
else:
executeInstruction(self,node, 1, 0)
#executeSentence2(self,node)
try:
for storedproc in TCgetFunctions():
if storedproc not in self.plcode:
self.plcode+=storedproc
except:
pass
archivo = open(path_c3d, 'a')
archivo.write("\n")
archivo.write(self.plcode)
archivo.write("\nup()")
archivo.close()
dotAST = graphAST(self)
printSymbolTable_ = printSymbolTable(self)
contentC3D = ""
try:
f = open(path_c3d, "r")
contentC3D = f.read()
f.close()
except Exception as e:
i=0#print(e)
result = execute_result(dotAST, printSymbolTable_, self.errors, self.messages, self.querys, contentC3D)
return result
def generateTemp(self):
self.tempcount+=1
temp = 't'+str(self.tempcount)
return temp
def getLastTemp(self):
temp = 't'+str(self.tempcount)
return temp
def generateLabel(self):
self.labelcount+=1
label = 'lbl'+str(self.labelcount)
return label
def getLastLabel(self):
label = 'lbl'+str(self.labelcount)
return label
#Como guardar un error
# self.errors.append(
# Error('Semántico', 'Ya existe una tabla con el nombre ' + nodo.id, nodo.fila, nodo.columna))
| #Importacion de metodos de ejecucion
#Se utilizan archivos separados para minimizar los conflictos
from .AST.sentence import *
from .executeSentence import executeSentence
from .executeSentence2 import executeSentence2
from .executeInstruction import executeInstruction
from .generateASTReport import graphAST
from .generateSymbolTableReport import printSymbolTable
from .execute_result import *
from .storageManager.TypeChecker import *
from io import StringIO # Python3
import sys
path_c3d = "C3D.py"
class Execute():
nodes = []
errors = []
messages = []
querys = []
ts = []
pila =[] # cada item es un diccionario {resultado,argumento1,argumento2,operacion}
plcode = ""
#intermediate = IntermediateFunctions()
types = {
1: 'Entero',
2: 'Decimal',
3: 'Cadena',
4: 'Variable',
5: 'Regex'
}
def __init__(self, nodes):
self.tempcount = -1
self.labelcount = -1
self.nodes = nodes
self.errors = []
self.messages = []
self.querys = []
# def __init__(self, nodes, errors):
# self.nodes = nodes
# self.errors = errors
#Aqui va metodo principal ejecutar, al que se le enviara la raiz del AST
#y se encargaran de llamar el resto de metodos
def execute(self):
if(self.nodes is not None):
global path_c3d
archivo = open(path_c3d, 'w+')
archivo.write("from execution.executeSentence import executeSentence ")
archivo.write("\nfrom execution.AST.sentence import *")
archivo.write("\nfrom execution.AST.expression import *")
archivo.write("\nfrom execution.executeInstruction import createFunction, deleteFunction")
archivo.write("\nfrom console import print_error, print_success, print_warning, print_text")
archivo.write("\nfrom goto import with_goto")
archivo.write("\nimport math")
archivo.write("\n\n@with_goto")
archivo.write("\ndef up():")
archivo.write("\n\tprint(1)\n")
archivo.close()
if(len(self.nodes)==0):
archivo = open(path_c3d, 'a')
archivo.write("\n\tprint(1)\n")
archivo.close()
for node in self.nodes:
#pprint(vars(node))
if isinstance(node,Sentence):
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
print(node)
val1 = new_stdout.getvalue()[:-1]
sys.stdout = old_stdout
archivo = open(path_c3d, 'a')
archivo.write("\n\t")
archivo.write(val1)
archivo.close()
else:
executeInstruction(self,node, 1, 0)
#executeSentence2(self,node)
try:
for storedproc in TCgetFunctions():
if storedproc not in self.plcode:
self.plcode+=storedproc
except:
pass
archivo = open(path_c3d, 'a')
archivo.write("\n")
archivo.write(self.plcode)
archivo.write("\nup()")
archivo.close()
dotAST = graphAST(self)
printSymbolTable_ = printSymbolTable(self)
contentC3D = ""
try:
f = open(path_c3d, "r")
contentC3D = f.read()
f.close()
except Exception as e:
i=0#print(e)
result = execute_result(dotAST, printSymbolTable_, self.errors, self.messages, self.querys, contentC3D)
return result
def generateTemp(self):
self.tempcount+=1
temp = 't'+str(self.tempcount)
return temp
def getLastTemp(self):
temp = 't'+str(self.tempcount)
return temp
def generateLabel(self):
self.labelcount+=1
label = 'lbl'+str(self.labelcount)
return label
def getLastLabel(self):
label = 'lbl'+str(self.labelcount)
return label
#Como guardar un error
# self.errors.append(
# Error('Semántico', 'Ya existe una tabla con el nombre ' + nodo.id, nodo.fila, nodo.columna))
| es | 0.679081 | #Importacion de metodos de ejecucion #Se utilizan archivos separados para minimizar los conflictos # Python3 # cada item es un diccionario {resultado,argumento1,argumento2,operacion} #intermediate = IntermediateFunctions() # def __init__(self, nodes, errors): # self.nodes = nodes # self.errors = errors #Aqui va metodo principal ejecutar, al que se le enviara la raiz del AST #y se encargaran de llamar el resto de metodos #pprint(vars(node)) #executeSentence2(self,node) #print(e) #Como guardar un error # self.errors.append( # Error('Semántico', 'Ya existe una tabla con el nombre ' + nodo.id, nodo.fila, nodo.columna)) | 2.511562 | 3 |
deepfashion2/extract_subset_of_files.py | chaoso/Detectron2-deepfashion2 | 3 | 6633220 | import shutil
import time
import os
tic = time.time()
NUMBER_OF_SAMPLES_TRAIN = 1000
NUMBER_OF_SAMPLES_VALIDATION = 300
TRAINING_IMAGE_FILE_PATH = "F:\\Downloads\\train\\train\\image"
VALIDATION_IMAGE_FILE_PATH = "F:\\Downloads\\validation\\validation\\image"
DST_VALIDATION_IMAGE_FILE_PATH = "F:\\mini_deepfashion2\\validation\\image"
DST_TRAINING_IMAGE_FILE_PATH = "F:\\mini_deepfashion2\\train\\image"
i = 0
for path, _, files in os.walk(TRAINING_IMAGE_FILE_PATH):
for file in files:
if i == NUMBER_OF_SAMPLES_TRAIN:
break
shutil.copy(os.path.join(path, file), DST_TRAINING_IMAGE_FILE_PATH)
i += 1
break
j = 0
for path, _, files in os.walk(VALIDATION_IMAGE_FILE_PATH):
for file in files:
if j == NUMBER_OF_SAMPLES_VALIDATION:
break
shutil.copy(os.path.join(path, file), DST_VALIDATION_IMAGE_FILE_PATH)
j += 1
break
print('Done (t={:0.2f}s)'.format(time.time() - tic))
| import shutil
import time
import os
tic = time.time()
NUMBER_OF_SAMPLES_TRAIN = 1000
NUMBER_OF_SAMPLES_VALIDATION = 300
TRAINING_IMAGE_FILE_PATH = "F:\\Downloads\\train\\train\\image"
VALIDATION_IMAGE_FILE_PATH = "F:\\Downloads\\validation\\validation\\image"
DST_VALIDATION_IMAGE_FILE_PATH = "F:\\mini_deepfashion2\\validation\\image"
DST_TRAINING_IMAGE_FILE_PATH = "F:\\mini_deepfashion2\\train\\image"
i = 0
for path, _, files in os.walk(TRAINING_IMAGE_FILE_PATH):
for file in files:
if i == NUMBER_OF_SAMPLES_TRAIN:
break
shutil.copy(os.path.join(path, file), DST_TRAINING_IMAGE_FILE_PATH)
i += 1
break
j = 0
for path, _, files in os.walk(VALIDATION_IMAGE_FILE_PATH):
for file in files:
if j == NUMBER_OF_SAMPLES_VALIDATION:
break
shutil.copy(os.path.join(path, file), DST_VALIDATION_IMAGE_FILE_PATH)
j += 1
break
print('Done (t={:0.2f}s)'.format(time.time() - tic))
| none | 1 | 2.465411 | 2 |
|
f1Scheduler.py | thaldi/f1CalenderNotify | 2 | 6633221 | <gh_stars>1-10
import HtmlHelper as htmlHelper
import datetime as dt
import ToastHelper as toast
baseUrl = "https://www.formula1.com"
parser = htmlHelper.HtmlParserHelper("{}/en/racing/2019.html".format(baseUrl))
allElements = parser.GetElements()
def ClearRaceDateValue(values):
dateValues = values.split("-")
raceDay = dateValues[len(dateValues) - 1].strip()
return raceDay
def CheckRaceDateIsToday(raceDate):
todayDate = dt.datetime.now()
day = todayDate.strftime("%d")
month = todayDate.strftime("%b")
mergedDate = "{} {}".format(day, month)
return True if mergedDate == raceDate else False
def Check():
for element in allElements:
href = element.get("href")
if href != "" or href is not None:
result = parser.GetPelementFromHref("{}{}".format(baseUrl, href))
if result is not None:
value = result.get("value")
if value != "" or value != None:
lastDate = ClearRaceDateValue(result.text)
result = CheckRaceDateIsToday(lastDate)
if result == True:
toast.ShowToastMessage(lastDate)
| import HtmlHelper as htmlHelper
import datetime as dt
import ToastHelper as toast
baseUrl = "https://www.formula1.com"
parser = htmlHelper.HtmlParserHelper("{}/en/racing/2019.html".format(baseUrl))
allElements = parser.GetElements()
def ClearRaceDateValue(values):
dateValues = values.split("-")
raceDay = dateValues[len(dateValues) - 1].strip()
return raceDay
def CheckRaceDateIsToday(raceDate):
todayDate = dt.datetime.now()
day = todayDate.strftime("%d")
month = todayDate.strftime("%b")
mergedDate = "{} {}".format(day, month)
return True if mergedDate == raceDate else False
def Check():
for element in allElements:
href = element.get("href")
if href != "" or href is not None:
result = parser.GetPelementFromHref("{}{}".format(baseUrl, href))
if result is not None:
value = result.get("value")
if value != "" or value != None:
lastDate = ClearRaceDateValue(result.text)
result = CheckRaceDateIsToday(lastDate)
if result == True:
toast.ShowToastMessage(lastDate) | none | 1 | 2.986205 | 3 |
|
shsa/uc_print.py | dratasich/shsa | 0 | 6633222 | <reponame>dratasich/shsa
#!/usr/bin/python3
"""Prints a SHSA model."""
import argparse
from model.shsamodel import SHSAModel, SHSANodeType
# parse optional config file
parser = argparse.ArgumentParser(description="""Execute SHSA engines given a
config file.""")
parser.add_argument('-m', '--model', type=str,
default="../config/shsamodel1.yaml",
help="SHSA model in a config file.")
args = parser.parse_args()
# yaml example
model = SHSAModel(configfile=args.model)
model.write_dot("uc_print_model", "pdf")
print(model)
| #!/usr/bin/python3
"""Prints a SHSA model."""
import argparse
from model.shsamodel import SHSAModel, SHSANodeType
# parse optional config file
parser = argparse.ArgumentParser(description="""Execute SHSA engines given a
config file.""")
parser.add_argument('-m', '--model', type=str,
default="../config/shsamodel1.yaml",
help="SHSA model in a config file.")
args = parser.parse_args()
# yaml example
model = SHSAModel(configfile=args.model)
model.write_dot("uc_print_model", "pdf")
print(model) | en | 0.365517 | #!/usr/bin/python3 Prints a SHSA model. # parse optional config file Execute SHSA engines given a config file. # yaml example | 2.900431 | 3 |
hasher-matcher-actioner/hmalib/lambdas/pdq/pdq_indexer.py | ekmixon/ThreatExchange | 0 | 6633223 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import os
import pickle
import typing as t
from functools import reduce
from urllib.parse import unquote_plus
import boto3
from threatexchange.signal_type.pdq_index import PDQIndex
from hmalib import metrics
from hmalib.common.logging import get_logger
from hmalib.common.s3_adapters import (
ThreatExchangeS3PDQAdapter,
S3ThreatDataConfig,
HashRowT,
)
logger = get_logger(__name__)
s3_client = boto3.client("s3")
THREAT_EXCHANGE_DATA_BUCKET_NAME = os.environ["THREAT_EXCHANGE_DATA_BUCKET_NAME"]
THREAT_EXCHANGE_DATA_FOLDER = os.environ["THREAT_EXCHANGE_DATA_FOLDER"]
THREAT_EXCHANGE_PDQ_FILE_EXTENSION = os.environ["THREAT_EXCHANGE_PDQ_FILE_EXTENSION"]
INDEXES_BUCKET_NAME = os.environ["INDEXES_BUCKET_NAME"]
PDQ_INDEX_KEY = os.environ["PDQ_INDEX_KEY"]
def unwrap_if_sns(data):
if "EventSource" in data and data["EventSource"] == "aws:sns":
message = data["Sns"]["Message"]
return json.loads(message)
return data
def is_s3_testevent(data):
return "Event" in data and data["Event"] == "s3:TestEvent"
def was_pdq_data_updated(event):
# TODO: This will attempt to load all pdq files everytime any pdq file is updated
# so if files are updated for c collaborations it will lead to c^2 files being read
# this can be optimized by no longer being event based but instead running on
# a timer if the files have changed.
for record in event["Records"]:
inner_record = unwrap_if_sns(record)
if is_s3_testevent(inner_record):
continue
for s3_record in inner_record["Records"]:
bucket_name = s3_record["s3"]["bucket"]["name"]
file_path = unquote_plus(s3_record["s3"]["object"]["key"])
if (
bucket_name == THREAT_EXCHANGE_DATA_BUCKET_NAME
and file_path.startswith(THREAT_EXCHANGE_DATA_FOLDER)
and file_path.endswith(THREAT_EXCHANGE_PDQ_FILE_EXTENSION)
):
return True
return False
def merge_pdq_files(
accumulator: t.Dict[str, HashRowT], hash_row: HashRowT
) -> t.Dict[str, HashRowT]:
hash, meta_data = hash_row
if hash not in accumulator.keys():
# Add hash as new row
accumulator[hash] = hash_row
else:
# Add new privacy group to existing row
accumulator[hash][1]["privacy_groups"].update(meta_data["privacy_groups"])
# Add new 'tags' for privacy group to existing row
accumulator[hash][1]["tags"].update(meta_data["tags"])
return accumulator
def lambda_handler(event, context):
"""
Listens to SQS events fired when new data files are added to the data
bucket's data directory. If the updated key matches a set of criteria,
converts the raw data file into an index and writes to an output S3 bucket.
As per the default configuration, the bucket must be
- the hashing data bucket eg.
dipanjanm-hashing-data20210224213427723700000003
- the key name must be in the ThreatExchange folder (eg. threat_exchange_data/)
- the key name must be a pdq file ending in ".pdq.te"
Which means adding new versions of the datasets will not have an effect. You
must add the exact pdq.te file.
"""
if not was_pdq_data_updated(event):
logger.info("PDQ Data Not Updated, skipping")
return
logger.info("PDQ Data Updated, updating pdq hash index")
metrics_logger = metrics.names.pdq_indexer_lambda
s3_config = S3ThreatDataConfig(
threat_exchange_data_bucket_name=THREAT_EXCHANGE_DATA_BUCKET_NAME,
threat_exchange_data_folder=THREAT_EXCHANGE_DATA_FOLDER,
threat_exchange_pdq_file_extension=THREAT_EXCHANGE_PDQ_FILE_EXTENSION,
)
pdq_data_files = ThreatExchangeS3PDQAdapter(
config=s3_config, metrics_logger=metrics_logger
).load_data()
with metrics.timer(metrics_logger.merge_datafiles):
logger.info("Merging PDQ Hash files")
flat_pdq_data = [
hash_row for pdq_file in pdq_data_files.values() for hash_row in pdq_file
]
merged_pdq_data = reduce(merge_pdq_files, flat_pdq_data, {}).values()
with metrics.timer(metrics_logger.build_index):
logger.info("Creating PDQ Hash Index")
index = PDQIndex.build(merged_pdq_data)
logger.info("Putting index in S3")
index_bytes = pickle.dumps(index)
with metrics.timer(metrics_logger.upload_index):
s3_client.put_object(
Bucket=INDEXES_BUCKET_NAME, Key=PDQ_INDEX_KEY, Body=index_bytes
)
logger.info("Index update complete")
metrics.flush()
# For testing purposes so that this can be run from the command line like:
# $ python3 -m hmalib.lambdas.pdq.pdq_indexer
if __name__ == "__main__":
privacy_group_id = 1234567890
data_updated_event = {
"Records": [
{
"Records": [
{
"s3": {
"bucket": {"name": THREAT_EXCHANGE_DATA_BUCKET_NAME},
"object": {
"key": THREAT_EXCHANGE_DATA_FOLDER
+ str(privacy_group_id)
+ THREAT_EXCHANGE_PDQ_FILE_EXTENSION
},
}
}
]
}
]
}
lambda_handler(data_updated_event, None)
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import os
import pickle
import typing as t
from functools import reduce
from urllib.parse import unquote_plus
import boto3
from threatexchange.signal_type.pdq_index import PDQIndex
from hmalib import metrics
from hmalib.common.logging import get_logger
from hmalib.common.s3_adapters import (
ThreatExchangeS3PDQAdapter,
S3ThreatDataConfig,
HashRowT,
)
logger = get_logger(__name__)
s3_client = boto3.client("s3")
THREAT_EXCHANGE_DATA_BUCKET_NAME = os.environ["THREAT_EXCHANGE_DATA_BUCKET_NAME"]
THREAT_EXCHANGE_DATA_FOLDER = os.environ["THREAT_EXCHANGE_DATA_FOLDER"]
THREAT_EXCHANGE_PDQ_FILE_EXTENSION = os.environ["THREAT_EXCHANGE_PDQ_FILE_EXTENSION"]
INDEXES_BUCKET_NAME = os.environ["INDEXES_BUCKET_NAME"]
PDQ_INDEX_KEY = os.environ["PDQ_INDEX_KEY"]
def unwrap_if_sns(data):
if "EventSource" in data and data["EventSource"] == "aws:sns":
message = data["Sns"]["Message"]
return json.loads(message)
return data
def is_s3_testevent(data):
return "Event" in data and data["Event"] == "s3:TestEvent"
def was_pdq_data_updated(event):
# TODO: This will attempt to load all pdq files everytime any pdq file is updated
# so if files are updated for c collaborations it will lead to c^2 files being read
# this can be optimized by no longer being event based but instead running on
# a timer if the files have changed.
for record in event["Records"]:
inner_record = unwrap_if_sns(record)
if is_s3_testevent(inner_record):
continue
for s3_record in inner_record["Records"]:
bucket_name = s3_record["s3"]["bucket"]["name"]
file_path = unquote_plus(s3_record["s3"]["object"]["key"])
if (
bucket_name == THREAT_EXCHANGE_DATA_BUCKET_NAME
and file_path.startswith(THREAT_EXCHANGE_DATA_FOLDER)
and file_path.endswith(THREAT_EXCHANGE_PDQ_FILE_EXTENSION)
):
return True
return False
def merge_pdq_files(
accumulator: t.Dict[str, HashRowT], hash_row: HashRowT
) -> t.Dict[str, HashRowT]:
hash, meta_data = hash_row
if hash not in accumulator.keys():
# Add hash as new row
accumulator[hash] = hash_row
else:
# Add new privacy group to existing row
accumulator[hash][1]["privacy_groups"].update(meta_data["privacy_groups"])
# Add new 'tags' for privacy group to existing row
accumulator[hash][1]["tags"].update(meta_data["tags"])
return accumulator
def lambda_handler(event, context):
"""
Listens to SQS events fired when new data files are added to the data
bucket's data directory. If the updated key matches a set of criteria,
converts the raw data file into an index and writes to an output S3 bucket.
As per the default configuration, the bucket must be
- the hashing data bucket eg.
dipanjanm-hashing-data20210224213427723700000003
- the key name must be in the ThreatExchange folder (eg. threat_exchange_data/)
- the key name must be a pdq file ending in ".pdq.te"
Which means adding new versions of the datasets will not have an effect. You
must add the exact pdq.te file.
"""
if not was_pdq_data_updated(event):
logger.info("PDQ Data Not Updated, skipping")
return
logger.info("PDQ Data Updated, updating pdq hash index")
metrics_logger = metrics.names.pdq_indexer_lambda
s3_config = S3ThreatDataConfig(
threat_exchange_data_bucket_name=THREAT_EXCHANGE_DATA_BUCKET_NAME,
threat_exchange_data_folder=THREAT_EXCHANGE_DATA_FOLDER,
threat_exchange_pdq_file_extension=THREAT_EXCHANGE_PDQ_FILE_EXTENSION,
)
pdq_data_files = ThreatExchangeS3PDQAdapter(
config=s3_config, metrics_logger=metrics_logger
).load_data()
with metrics.timer(metrics_logger.merge_datafiles):
logger.info("Merging PDQ Hash files")
flat_pdq_data = [
hash_row for pdq_file in pdq_data_files.values() for hash_row in pdq_file
]
merged_pdq_data = reduce(merge_pdq_files, flat_pdq_data, {}).values()
with metrics.timer(metrics_logger.build_index):
logger.info("Creating PDQ Hash Index")
index = PDQIndex.build(merged_pdq_data)
logger.info("Putting index in S3")
index_bytes = pickle.dumps(index)
with metrics.timer(metrics_logger.upload_index):
s3_client.put_object(
Bucket=INDEXES_BUCKET_NAME, Key=PDQ_INDEX_KEY, Body=index_bytes
)
logger.info("Index update complete")
metrics.flush()
# For testing purposes so that this can be run from the command line like:
# $ python3 -m hmalib.lambdas.pdq.pdq_indexer
if __name__ == "__main__":
privacy_group_id = 1234567890
data_updated_event = {
"Records": [
{
"Records": [
{
"s3": {
"bucket": {"name": THREAT_EXCHANGE_DATA_BUCKET_NAME},
"object": {
"key": THREAT_EXCHANGE_DATA_FOLDER
+ str(privacy_group_id)
+ THREAT_EXCHANGE_PDQ_FILE_EXTENSION
},
}
}
]
}
]
}
lambda_handler(data_updated_event, None)
| en | 0.810003 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # TODO: This will attempt to load all pdq files everytime any pdq file is updated # so if files are updated for c collaborations it will lead to c^2 files being read # this can be optimized by no longer being event based but instead running on # a timer if the files have changed. # Add hash as new row # Add new privacy group to existing row # Add new 'tags' for privacy group to existing row Listens to SQS events fired when new data files are added to the data bucket's data directory. If the updated key matches a set of criteria, converts the raw data file into an index and writes to an output S3 bucket. As per the default configuration, the bucket must be - the hashing data bucket eg. dipanjanm-hashing-data20210224213427723700000003 - the key name must be in the ThreatExchange folder (eg. threat_exchange_data/) - the key name must be a pdq file ending in ".pdq.te" Which means adding new versions of the datasets will not have an effect. You must add the exact pdq.te file. # For testing purposes so that this can be run from the command line like: # $ python3 -m hmalib.lambdas.pdq.pdq_indexer | 1.963047 | 2 |
quick_cgmod.py | franknu/cgmodsel | 1 | 6633224 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME> (<EMAIL>), 2020
Demo for
Nussbaum, F. & <NAME>. (2020). Pairwise sparse + low-rank models for variables of mixed type.
Journal of Multivariate Analysis, 2020.
If you use this software, please consider citing this article.
"""
# pylint: disable=C0103
from cgmodsel.admm import AdmmCGaussianPW, AdmmGaussianPW
from cgmodsel.admm import AdmmCGaussianSL, AdmmGaussianSL
from cgmodsel.dataops import load_prepare_data # function to read data
def load(dataset: dict):
"""
load csv with file path dataset['filename']
return tuple (cat_data, cont_data, meta),
where cat_data is the binary data, cont_data is the quantitative data,
and meta is meta information about the dataset
"""
# print('Loading data...(%s)'%(dataset['filename']))
## parameters for loading function ##
loaddict = {'catuniques': None, 'standardize': True, 'verb':True}
# standardize quantitative variables before learning model
# catuniques: values of the binary variables (to support read function)
# recommended to provide this if binary variables are not strings such as 'yes'/'no'
if 'sparams' in dataset:
loaddict.update(dataset['sparams'])
return load_prepare_data(dataset['filename'],
cattype='dummy_red',
**loaddict)
def learn_sparse_model(data, regparam):
cat_data, cont_data, meta = load(data) # load the data
print(meta)
# return
###### fit models
## initialize solver and drop data ##
if meta['n_cat'] > 0: # binary variables are present
print('Using pseudo-likelihood solver in the presence of discrete variables...')
solver = AdmmCGaussianPW()
solver.drop_data((cat_data, cont_data), meta)
else: # purely Gaussian model
print('Using likelihood solver for purely Gaussian model...')
solver = AdmmGaussianPW()
solver.drop_data(cont_data, meta)
solver.set_regularization_params(regparam)
## solve the problem, that is, estimate a sparse model ##
print('Solving the problem...')
solver.solve(verb=1, use_u=0) # use_u=0 turns of univariate discrete parameters
###### model visualization
model = solver.get_canonicalparams() # PW model instance
model.repr_graphical(diagonal=0) # plottype='pn'
return model
def learn_sl_model(data, regparams):
cat_data, cont_data, meta = load(data) # load the data
###### fit models
## initialize solver and drop data ##
if meta['n_cat'] > 0: # binary variables are present
print('Using pseudo-likelihood solver in the presence of discrete variables...')
solver = AdmmCGaussianSL()
solver.drop_data((cat_data, cont_data), meta)
else: # purely Gaussian model
print('Using likelihood solver for purely Gaussian model...')
solver = AdmmGaussianSL()
solver.drop_data(cont_data, meta)
solver.set_regularization_params(regparams)
## solve the problem, that is, estimate a sparse + low-rank model ##
print('Solving the problem...')
solver.solve(verb=0)
###### model visualization
model = solver.get_canonicalparams() # S + L model instance
model.plot_sl(plottype='pn')
return model
if __name__ == '__main__':
###### data sets
## binary ##
ABILITY = {
'filename': "datasets/ability_proc.csv",
'regparams': (.2, .5),
'sparams': {
'catuniques': [0, 1]
} # values that binary variables take
}
CFMT = {
'filename': "datasets/CFMTkurzBIN.csv",
'regparams': (.15, 1.5),
'sparams': {
'catuniques': [0, 1]
} # values that binary variables take
}
## quantitative ##
LSVT = {
'filename': "datasets/LSVT.csv",
'regparams': (.1, 1),
}
## mixed binary-quantitative ##
ALLBUS = {
'filename': "datasets/allbus2016_proc.csv",
'regparams': (1, 2),
}
HELP = {
'filename': "datasets/HELPmiss_proc.csv",
'regparams': (.5, 2),
}
IRIS = {
'filename': 'datasets/iris.csv',
'regparams': (.5, 2)
}
###### select and load data set
# ********************************* #
# comment out all but one line here #
data = CFMT
# data = LSVT
# data = IRIS
# ********************************* #
## set regularization parameters for sparse + low-rank model ##
# for an introduction to the models, please see:
# https://github.com/franknu/cgmodsel/wiki
# you may try different values, any pair of positive reals will do
# e.g., regparams = (.1, 1)
model = learn_sl_model(data, regparams=data['regparams'])
## or learn a purely sparse graphical model wo. low-rank component
# model = learn_sparse_model(data, regparam=3.0)
# model.get_params()
# model.get_meanparams()
# model.get_meta()
# model.save(outfile="saved_model")
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME> (<EMAIL>), 2020
Demo for
Nussbaum, F. & <NAME>. (2020). Pairwise sparse + low-rank models for variables of mixed type.
Journal of Multivariate Analysis, 2020.
If you use this software, please consider citing this article.
"""
# pylint: disable=C0103
from cgmodsel.admm import AdmmCGaussianPW, AdmmGaussianPW
from cgmodsel.admm import AdmmCGaussianSL, AdmmGaussianSL
from cgmodsel.dataops import load_prepare_data # function to read data
def load(dataset: dict):
"""
load csv with file path dataset['filename']
return tuple (cat_data, cont_data, meta),
where cat_data is the binary data, cont_data is the quantitative data,
and meta is meta information about the dataset
"""
# print('Loading data...(%s)'%(dataset['filename']))
## parameters for loading function ##
loaddict = {'catuniques': None, 'standardize': True, 'verb':True}
# standardize quantitative variables before learning model
# catuniques: values of the binary variables (to support read function)
# recommended to provide this if binary variables are not strings such as 'yes'/'no'
if 'sparams' in dataset:
loaddict.update(dataset['sparams'])
return load_prepare_data(dataset['filename'],
cattype='dummy_red',
**loaddict)
def learn_sparse_model(data, regparam):
cat_data, cont_data, meta = load(data) # load the data
print(meta)
# return
###### fit models
## initialize solver and drop data ##
if meta['n_cat'] > 0: # binary variables are present
print('Using pseudo-likelihood solver in the presence of discrete variables...')
solver = AdmmCGaussianPW()
solver.drop_data((cat_data, cont_data), meta)
else: # purely Gaussian model
print('Using likelihood solver for purely Gaussian model...')
solver = AdmmGaussianPW()
solver.drop_data(cont_data, meta)
solver.set_regularization_params(regparam)
## solve the problem, that is, estimate a sparse model ##
print('Solving the problem...')
solver.solve(verb=1, use_u=0) # use_u=0 turns of univariate discrete parameters
###### model visualization
model = solver.get_canonicalparams() # PW model instance
model.repr_graphical(diagonal=0) # plottype='pn'
return model
def learn_sl_model(data, regparams):
cat_data, cont_data, meta = load(data) # load the data
###### fit models
## initialize solver and drop data ##
if meta['n_cat'] > 0: # binary variables are present
print('Using pseudo-likelihood solver in the presence of discrete variables...')
solver = AdmmCGaussianSL()
solver.drop_data((cat_data, cont_data), meta)
else: # purely Gaussian model
print('Using likelihood solver for purely Gaussian model...')
solver = AdmmGaussianSL()
solver.drop_data(cont_data, meta)
solver.set_regularization_params(regparams)
## solve the problem, that is, estimate a sparse + low-rank model ##
print('Solving the problem...')
solver.solve(verb=0)
###### model visualization
model = solver.get_canonicalparams() # S + L model instance
model.plot_sl(plottype='pn')
return model
if __name__ == '__main__':
###### data sets
## binary ##
ABILITY = {
'filename': "datasets/ability_proc.csv",
'regparams': (.2, .5),
'sparams': {
'catuniques': [0, 1]
} # values that binary variables take
}
CFMT = {
'filename': "datasets/CFMTkurzBIN.csv",
'regparams': (.15, 1.5),
'sparams': {
'catuniques': [0, 1]
} # values that binary variables take
}
## quantitative ##
LSVT = {
'filename': "datasets/LSVT.csv",
'regparams': (.1, 1),
}
## mixed binary-quantitative ##
ALLBUS = {
'filename': "datasets/allbus2016_proc.csv",
'regparams': (1, 2),
}
HELP = {
'filename': "datasets/HELPmiss_proc.csv",
'regparams': (.5, 2),
}
IRIS = {
'filename': 'datasets/iris.csv',
'regparams': (.5, 2)
}
###### select and load data set
# ********************************* #
# comment out all but one line here #
data = CFMT
# data = LSVT
# data = IRIS
# ********************************* #
## set regularization parameters for sparse + low-rank model ##
# for an introduction to the models, please see:
# https://github.com/franknu/cgmodsel/wiki
# you may try different values, any pair of positive reals will do
# e.g., regparams = (.1, 1)
model = learn_sl_model(data, regparams=data['regparams'])
## or learn a purely sparse graphical model wo. low-rank component
# model = learn_sparse_model(data, regparam=3.0)
# model.get_params()
# model.get_meanparams()
# model.get_meta()
# model.save(outfile="saved_model")
| en | 0.569747 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- @author: <NAME> (<EMAIL>), 2020 Demo for Nussbaum, F. & <NAME>. (2020). Pairwise sparse + low-rank models for variables of mixed type. Journal of Multivariate Analysis, 2020. If you use this software, please consider citing this article. # pylint: disable=C0103 # function to read data load csv with file path dataset['filename'] return tuple (cat_data, cont_data, meta), where cat_data is the binary data, cont_data is the quantitative data, and meta is meta information about the dataset # print('Loading data...(%s)'%(dataset['filename'])) ## parameters for loading function ## # standardize quantitative variables before learning model # catuniques: values of the binary variables (to support read function) # recommended to provide this if binary variables are not strings such as 'yes'/'no' # load the data # return ###### fit models ## initialize solver and drop data ## # binary variables are present # purely Gaussian model ## solve the problem, that is, estimate a sparse model ## # use_u=0 turns of univariate discrete parameters ###### model visualization # PW model instance # plottype='pn' # load the data ###### fit models ## initialize solver and drop data ## # binary variables are present # purely Gaussian model ## solve the problem, that is, estimate a sparse + low-rank model ## ###### model visualization # S + L model instance ###### data sets ## binary ## # values that binary variables take # values that binary variables take ## quantitative ## ## mixed binary-quantitative ## ###### select and load data set # ********************************* # # comment out all but one line here # # data = LSVT # data = IRIS # ********************************* # ## set regularization parameters for sparse + low-rank model ## # for an introduction to the models, please see: # https://github.com/franknu/cgmodsel/wiki # you may try different values, any pair of positive reals will do # e.g., regparams = (.1, 1) ## or learn a purely sparse graphical model wo. low-rank component # model = learn_sparse_model(data, regparam=3.0) # model.get_params() # model.get_meanparams() # model.get_meta() # model.save(outfile="saved_model") | 2.54918 | 3 |
section-20-modules/greet.py | mugan86/bootcamp-basic-to-expert-from-scratch | 31 | 6633225 | <filename>section-20-modules/greet.py
# Saludos a la persona indicada
def hello(name):
return f'Hola {name}'
def goodbye(name):
return f'Adios {name}' | <filename>section-20-modules/greet.py
# Saludos a la persona indicada
def hello(name):
return f'Hola {name}'
def goodbye(name):
return f'Adios {name}' | es | 0.848734 | # Saludos a la persona indicada | 1.826949 | 2 |
eggs/bx_python-0.7.2-py2.6-linux-x86_64-ucs4.egg/EGG-INFO/scripts/bed_intersect.py | psnehal/MethylSig | 0 | 6633226 | #!/afs/bx.psu.edu/project/pythons/py2.6-linux-x86_64-ucs4/bin/python2.6
"""
Find regions of first bed file that overlap regions in a second bed file. The
output preserves all fields from the input.
NOTE: -u and -d options are currently not functional!
usage: %prog bed_file_1 bed_file_2
-m, --mincols=N: Require this much overlap (default 1bp)
-u, --upstream_pad=N: upstream interval padding (default 0bp)
-d, --downstream_pad=N: downstream interval padding (default 0bp)
-v, --reverse: Print regions that DO NOT overlap
-b, --booleans: Just print '1' if interval overlaps or '0' otherwise
"""
import sys
from warnings import warn
from bx.bitset import *
from bx.bitset_builders import *
from bx.cookbook import doc_optparse
mincols = 1
upstream_pad = 0
downstream_pad = 0
options, args = doc_optparse.parse( __doc__ )
try:
if options.mincols: mincols = int( options.mincols )
if options.upstream_pad: upstream_pad = int( options.upstream_pad )
if options.downstream_pad: downstream_pad = int( options.downstream_pad )
reverse = bool( options.reverse )
booleans = bool( options.booleans )
in_fname, in2_fname = args
except:
doc_optparse.exit()
# Read first bed into some bitsets
bitsets = binned_bitsets_from_file( open( in2_fname ) )
# Read second BED and intersect
for line in open( in_fname ):
if line.startswith("#") or line.isspace():
continue
fields = line.split()
start, end = int( fields[1] ), int( fields[2] )
if start > end:
warn( "Bed interval start after end!" )
if fields[0] in bitsets and bitsets[fields[0]].count_range( start, end-start ) >= mincols:
if booleans:
if reverse:
print 0
else:
print 1
elif not reverse:
print line,
else:
if booleans:
if reverse:
print 1
else:
print 0
elif reverse:
print line,
| #!/afs/bx.psu.edu/project/pythons/py2.6-linux-x86_64-ucs4/bin/python2.6
"""
Find regions of first bed file that overlap regions in a second bed file. The
output preserves all fields from the input.
NOTE: -u and -d options are currently not functional!
usage: %prog bed_file_1 bed_file_2
-m, --mincols=N: Require this much overlap (default 1bp)
-u, --upstream_pad=N: upstream interval padding (default 0bp)
-d, --downstream_pad=N: downstream interval padding (default 0bp)
-v, --reverse: Print regions that DO NOT overlap
-b, --booleans: Just print '1' if interval overlaps or '0' otherwise
"""
import sys
from warnings import warn
from bx.bitset import *
from bx.bitset_builders import *
from bx.cookbook import doc_optparse
mincols = 1
upstream_pad = 0
downstream_pad = 0
options, args = doc_optparse.parse( __doc__ )
try:
if options.mincols: mincols = int( options.mincols )
if options.upstream_pad: upstream_pad = int( options.upstream_pad )
if options.downstream_pad: downstream_pad = int( options.downstream_pad )
reverse = bool( options.reverse )
booleans = bool( options.booleans )
in_fname, in2_fname = args
except:
doc_optparse.exit()
# Read first bed into some bitsets
bitsets = binned_bitsets_from_file( open( in2_fname ) )
# Read second BED and intersect
for line in open( in_fname ):
if line.startswith("#") or line.isspace():
continue
fields = line.split()
start, end = int( fields[1] ), int( fields[2] )
if start > end:
warn( "Bed interval start after end!" )
if fields[0] in bitsets and bitsets[fields[0]].count_range( start, end-start ) >= mincols:
if booleans:
if reverse:
print 0
else:
print 1
elif not reverse:
print line,
else:
if booleans:
if reverse:
print 1
else:
print 0
elif reverse:
print line,
| en | 0.738765 | #!/afs/bx.psu.edu/project/pythons/py2.6-linux-x86_64-ucs4/bin/python2.6 Find regions of first bed file that overlap regions in a second bed file. The output preserves all fields from the input. NOTE: -u and -d options are currently not functional! usage: %prog bed_file_1 bed_file_2 -m, --mincols=N: Require this much overlap (default 1bp) -u, --upstream_pad=N: upstream interval padding (default 0bp) -d, --downstream_pad=N: downstream interval padding (default 0bp) -v, --reverse: Print regions that DO NOT overlap -b, --booleans: Just print '1' if interval overlaps or '0' otherwise # Read first bed into some bitsets # Read second BED and intersect | 2.715716 | 3 |
src/openprocurement/tender/limited/includeme.py | ProzorroUKR/openprocurement.api | 10 | 6633227 | # -*- coding: utf-8 -*-
from logging import getLogger
from pyramid.interfaces import IRequest
from openprocurement.api.interfaces import IContentConfigurator
from openprocurement.tender.limited.models import (
ReportingTender,
NegotiationTender,
NegotiationQuickTender,
IReportingTender,
INegotiationTender,
INegotiationQuickTender,
)
from openprocurement.tender.limited.adapters import (
TenderReportingConfigurator,
TenderNegotiationConfigurator,
TenderNegotiationQuickConfigurator,
)
LOGGER = getLogger("openprocurement.tender.limited")
def includeme(config):
LOGGER.info("Init tender.limited.reporting plugin.")
config.add_tender_procurementMethodType(ReportingTender)
config.scan("openprocurement.tender.limited.views")
config.scan("openprocurement.tender.limited.procedure.views")
config.scan("openprocurement.tender.limited.subscribers")
config.registry.registerAdapter(TenderReportingConfigurator, (IReportingTender, IRequest), IContentConfigurator)
def includeme_negotiation(config):
LOGGER.info("Init tender.limited.negotiation plugin.")
config.add_tender_procurementMethodType(NegotiationTender)
config.scan("openprocurement.tender.limited.views")
config.scan("openprocurement.tender.limited.procedure.views")
config.scan("openprocurement.tender.limited.subscribers")
config.registry.registerAdapter(TenderNegotiationConfigurator, (INegotiationTender, IRequest), IContentConfigurator)
def includeme_negotiation_quick(config):
LOGGER.info("Init tender.limited.negotiation.quick plugin.")
config.add_tender_procurementMethodType(NegotiationQuickTender)
config.scan("openprocurement.tender.limited.views")
config.scan("openprocurement.tender.limited.procedure.views")
config.scan("openprocurement.tender.limited.subscribers")
config.registry.registerAdapter(
TenderNegotiationQuickConfigurator, (INegotiationQuickTender, IRequest), IContentConfigurator
)
| # -*- coding: utf-8 -*-
from logging import getLogger
from pyramid.interfaces import IRequest
from openprocurement.api.interfaces import IContentConfigurator
from openprocurement.tender.limited.models import (
ReportingTender,
NegotiationTender,
NegotiationQuickTender,
IReportingTender,
INegotiationTender,
INegotiationQuickTender,
)
from openprocurement.tender.limited.adapters import (
TenderReportingConfigurator,
TenderNegotiationConfigurator,
TenderNegotiationQuickConfigurator,
)
LOGGER = getLogger("openprocurement.tender.limited")
def includeme(config):
LOGGER.info("Init tender.limited.reporting plugin.")
config.add_tender_procurementMethodType(ReportingTender)
config.scan("openprocurement.tender.limited.views")
config.scan("openprocurement.tender.limited.procedure.views")
config.scan("openprocurement.tender.limited.subscribers")
config.registry.registerAdapter(TenderReportingConfigurator, (IReportingTender, IRequest), IContentConfigurator)
def includeme_negotiation(config):
LOGGER.info("Init tender.limited.negotiation plugin.")
config.add_tender_procurementMethodType(NegotiationTender)
config.scan("openprocurement.tender.limited.views")
config.scan("openprocurement.tender.limited.procedure.views")
config.scan("openprocurement.tender.limited.subscribers")
config.registry.registerAdapter(TenderNegotiationConfigurator, (INegotiationTender, IRequest), IContentConfigurator)
def includeme_negotiation_quick(config):
LOGGER.info("Init tender.limited.negotiation.quick plugin.")
config.add_tender_procurementMethodType(NegotiationQuickTender)
config.scan("openprocurement.tender.limited.views")
config.scan("openprocurement.tender.limited.procedure.views")
config.scan("openprocurement.tender.limited.subscribers")
config.registry.registerAdapter(
TenderNegotiationQuickConfigurator, (INegotiationQuickTender, IRequest), IContentConfigurator
)
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.915789 | 2 |
qr.py | guenter-r/QR_code_command_line | 0 | 6633228 | """
Just an quick implementation for a fully private QR code generator.
"""
import qrcode
from string import punctuation
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
print('Text to QR-tify:')
input_data = input()
qr.add_data(input_data)
qr.make(fit=True)
print('select colors:\n(1)QR Code Color\n(2)Background color')
colors = ['pink','orange','red','blue','green','grey','black','white','mediumvioletred','tomato','brown','mediumseagreen','darkslategray','indigo','darksalmon','cyan','palevioletred','darkcyan','firebrick','darkolivegreen','forestgreen','darkslateblue','olive','greenyellow','lime','wheat','gold']
available = ', '.join(sorted(colors)).upper()
print(f'Color ideas: {available}.')
print('\nSelect code color:')
qr_color = input().lower()
# while qr_color not in colors:
# qr_color = input()
print('\nSelect background color:')
qr_back = input().lower()
# while qr_back not in colors:
# qr_back = input()
# create img file
img = qr.make_image(fill_color=qr_color, back_color=qr_back)
# Save file -> name from QR input
title = ''
for el in input_data[:50]:
if el not in punctuation:
if not el.isspace():
title+=el
else:
title+='_'
# Save QR code
img.save(f'{title[:5]}.png')
| """
Just an quick implementation for a fully private QR code generator.
"""
import qrcode
from string import punctuation
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
print('Text to QR-tify:')
input_data = input()
qr.add_data(input_data)
qr.make(fit=True)
print('select colors:\n(1)QR Code Color\n(2)Background color')
colors = ['pink','orange','red','blue','green','grey','black','white','mediumvioletred','tomato','brown','mediumseagreen','darkslategray','indigo','darksalmon','cyan','palevioletred','darkcyan','firebrick','darkolivegreen','forestgreen','darkslateblue','olive','greenyellow','lime','wheat','gold']
available = ', '.join(sorted(colors)).upper()
print(f'Color ideas: {available}.')
print('\nSelect code color:')
qr_color = input().lower()
# while qr_color not in colors:
# qr_color = input()
print('\nSelect background color:')
qr_back = input().lower()
# while qr_back not in colors:
# qr_back = input()
# create img file
img = qr.make_image(fill_color=qr_color, back_color=qr_back)
# Save file -> name from QR input
title = ''
for el in input_data[:50]:
if el not in punctuation:
if not el.isspace():
title+=el
else:
title+='_'
# Save QR code
img.save(f'{title[:5]}.png')
| en | 0.584415 | Just an quick implementation for a fully private QR code generator. # while qr_color not in colors: # qr_color = input() # while qr_back not in colors: # qr_back = input() # create img file # Save file -> name from QR input # Save QR code | 3.074111 | 3 |
lfs_project/lfs/tests.py | ailov99/lfs | 0 | 6633229 | from django.test import TestCase, Client
from django.contrib.auth.models import User, AnonymousUser
from models import Teacher, Module, Takers, Page, ContentFile
class UserAppTests(TestCase):
def setUp(self):
self.c = Client()
self.user = User.objects.create_user("jdoe",
"<EMAIL>",
"123",
first_name="John",
last_name="Doe")
self.teacher = Teacher.objects.create(user=self.user,
bio="I am a dummy teacher",
school="Dummy School")
def test_user_registration_model(self):
"""
On registering, a new user is created, tied to an 'empty'
Teacher model
"""
# Assure DB is properly populated with correct data
self.assertEqual(User.objects.filter(username="jdoe").count(), 1)
self.assertEqual(Teacher.objects.filter(user__username="jdoe").count(), 1)
def test_user_registration_api(self):
"""
Correct registration POST will result in a properly populated
DB with both User and Teacher
"""
# attempt to send data which does not comply with the Terms and Cond
response = self.c.post('/lfs/register/', {'username': 'jstorer',
'email': '<EMAIL>',
'first_name': 'Jeremy',
'last_name': 'Storer',
'password': '<PASSWORD>',
'terms' : False})
self.assertEqual(response.status_code, 200)
self.assertEqual(User.objects.filter(username='jstorer').count(), 0)
# send terms-compliant data for a dummy user
response = self.c.post('/lfs/register/', {'username': 'jstorer',
'email': '<EMAIL>',
'first_name': 'Jeremy',
'last_name': 'Storer',
'password': '<PASSWORD>',
'terms' : True})
# Should be redirected to login page
self.assertRedirects(response, '/lfs/login/')
# Assure DB is properly populated with correct data
self.assertEqual(User.objects.filter(username='jstorer').count(), 1)
new_user = User.objects.filter(username='jstorer')[0]
self.assertEqual(new_user.first_name, 'Jeremy')
self.assertEqual(new_user.last_name, 'Storer')
self.assertEqual(Teacher.objects.filter(user__username='jstorer').count(), 1)
new_teacher = Teacher.objects.filter(user__username='jstorer')[0]
self.assertEqual(new_teacher.bio, '')
self.assertEqual(new_teacher.school, '')
def test_user_login(self):
"""
A user who is in the DB should be able to log in (as a teacher)
"""
# send a POST with username/pass
response = self.c.post('/lfs/login/', {'username': 'jdoe', 'password': '<PASSWORD>'})
# should be at dashboard
self.assertRedirects(response, '/lfs/dashboard/')
def test_user_logout(self):
"""
An existing user should be able to log out if currently logged in
"""
# log in (should redirect)
login_response = self.c.post('/lfs/login/', {'username': 'jdoe', 'password': '<PASSWORD>'})
self.assertEqual(login_response.status_code, 302)
logout_response = self.c.post('/lfs/logout/', {})
self.assertRedirects(logout_response, '/lfs/login/')
def test_user_dashboard(self):
"""
A logged in user should see their dashboard, with all
modules taken, their per-module and overall progress
"""
user_two = User.objects.create_user("tsinger", "<EMAIL>", "123",
first_name="Tim", last_name="Singer")
teacher_two = Teacher.objects.create(user=user_two, bio="I teach programming",
school="Glasgow School")
# populate DB with modules taken by user, with random progress stats
data = {"Introduction to Sustainable Development" : 12,
"What does it all mean?" : 59,
"How to teach students" : 77}
dummy_modules = []
for key in data:
module = Module.objects.create(title=key)
dummy_modules.append(module)
Takers.objects.create(user=self.user, module=module, progress=data[key])
# assign two of the modules to the other user (with different progress)
Takers.objects.create(user=user_two, module=Module.objects
.filter(title="What does it all mean?")[0],
progress=99)
Takers.objects.create(user=user_two, module=Module.objects
.filter(title="How to teach students")[0],
progress=32)
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
# dashboard GET
dashboard_response = self.c.get('/lfs/dashboard/')
# check modules
self.assertEqual(set(dashboard_response.context['modules_progress'].keys()),
set(dummy_modules))
# check per-module progress
for key in dummy_modules:
self.assertEqual(dashboard_response.context['modules_progress'][key],
data[key.title])
# check overall progress
self.assertEqual(dashboard_response.context['overall_progress'],
reduce(lambda x,y: x+y, [data[key] for key in data]) / len(data))
def test_module_content_viewable_by_user(self):
"""
A user should be able to see all available modules' contents
"""
# Assign random modules to test user
mod1 = Module.objects.create(title="First Module")
mod2 = Module.objects.create(title="Second Module")
Takers.objects.create(user=self.user, module=mod1)
Takers.objects.create(user=self.user, module=mod2)
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
# module that our user has taken (not necessary for viewing)
module = Module.objects.create(title="Third Module")
Takers.objects.create(user=self.user, module=module)
# assign some pages to module
Page.objects.create(module=module,
position=0,
section="First Page",
content="""
Vivamus sit amet auctor nisl, in auctor augue.
Nullam a purus eu erat semper eleifend quis a ex.
Phasellus a tortor quis lectus ultrices vestibulum sit amet ac nunc.
In rutrum hendrerit lorem non consequat. Ut malesuada orci ligula,
eu dapibus est viverra sed. Fusce lacinia ante non porta cursus.
""")
Page.objects.create(module=module,
position=1,
section="Second Page",
content="""
Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Nunc pellentesque nec orci dignissim congue. Quisque nec interdum augue.
Praesent ultricies felis nec nulla gravida tempor. In consequat aliquam
congue.
""")
# module GET
module_response = self.c.get('/lfs/module/{0}/'.format(module.id))
# check currently viewable module
self.assertEqual(module_response.context['module_title'],module.title)
self.assertEqual(set(module_response.context['module_pages']),
set(tuple(i for i in module.page_set.all())))
self.assertContains(module_response, module.title)
# Should be on first page by default
self.assertContains(module_response, module.page_set.all()[0].section)
# Should be tabbed on currently viewed Module by default
# so all sections of that module should be displayed (navigatable)
for page in module.page_set.all():
self.assertContains(module_response, page.section)
# all other user taken modules should be visible
user_module_junctions = tuple(i.module for i in Takers.objects.filter(user=self.user))
for mod in user_module_junctions:
self.assertContains(module_response, mod.title)
def test_user_leaderboard(self):
"""
Users should be able to see a leaderboard of all users using the website
"""
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
# assign some modules to dummy user to check progress
mod_one = Module.objects.create(title="First Module")
mod_two = Module.objects.create(title="Second Module")
Takers.objects.create(user=self.user, module=mod_one, progress=55)
Takers.objects.create(user=self.user, module=mod_two, progress=35)
# leaderboards GET
response = self.c.get('/lfs/leaderboard/')
# check all users are passed with their progress
user_count = Teacher.objects.all().count()
self.assertEqual(len(response.context['all_users_overall_progress']), user_count)
# check progress
self.assertEqual(response.context['all_users_overall_progress'][self.user],
45)
def test_user_profile(self):
"""
A registered user should be able to inspect their own as well
as others' profiles
"""
user_j = User.objects.create_user("jstorer", "<EMAIL>", "123",
first_name="Jeremy", last_name="Storer")
teacher_j = Teacher.objects.create(user=user_j, bio="I am a teacher",
school="Glasgow University")
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
# inspect own profile
own_response = self.c.get('/lfs/profile/{0}/'.format(self.user.id))
self.assertEqual(self.teacher, own_response.context['teacher'])
# inspect someone else's profile
else_response = self.c.get('/lfs/profile/{0}/'.format(user_j.id))
self.assertEqual(teacher_j, else_response.context['teacher'])
def test_user_get_edit_profile(self):
"""
Logged in users should be able to edit their own profiles
"""
user_j = User.objects.create_user("jstorer", "<EMAIL>", "123",
first_name="Jeremy", last_name="Storer")
teacher_j = Teacher.objects.create(user=user_j, bio="I am a teacher",
school="Glasgow University")
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
# test editing own profile
own_response = self.c.get('/lfs/profile/{0}/edit/'.format(self.user.id))
self.assertEqual(own_response.context['teacher'], self.teacher)
self.assertEqual(own_response.context['user_form'].instance, self.user)
# test editing some else's (not allowed)
else_response = self.c.get('/lfs/profile/{0}/edit/'.format(user_j.id))
self.assertRedirects(else_response, '/lfs/profile/{0}/'.format(user_j.id))
def test_user_post_edit_profile(self):
"""
Logged in users should be able to edit their own profiles
"""
user_j = User.objects.create_user("jstorer", "<EMAIL>", "123",
first_name="Jeremy", last_name="Storer")
teacher_j = Teacher.objects.create(user=user_j, bio="I am a teacher",
school="Glasgow University")
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
own_response = self.c.post('/lfs/profile/{0}/edit/'.format(self.user.id),
{'username' : 'jdoe',
'password' : '<PASSWORD>' ,
'first_name' : 'Gethin',
'last_name' : 'Gay',
'bio' : 'new bio',
'school' : 'Caledonian'})
def test_user_change_password(self):
"""
A user should be able to request change
of their password
"""
self.c.login(username="jdoe", password="<PASSWORD>")
new_p = '<PASSWORD>'
old_p = '123'
# TODO: enable once GET requests are handled in views
# Wrong old password
#response = self.c.post('/lfs/change_password/{0}'.format(self.user.id),
# {'old_password' : '<PASSWORD>',
# 'new_password' : <PASSWORD>,
# 'confirm_new_password' : <PASSWORD>})
#self.assertEqual(self.user.password, <PASSWORD>_p)
# Wrong new password confirmation
#response = self.c.post('/lfs/change_password/{0}'.format(self.user.id),
# {'old_password' : <PASSWORD>,
# 'new_password' : <PASSWORD>,
# 'confirm_new_password' : '<PASSWORD>'})
#self.assertEqual(self.user.password, <PASSWORD>_p)
# Proper
response = self.c.post('/lfs/change_password/',
{'old_password' : <PASSWORD>,
'new_password1' : <PASSWORD>,
'new_password2' : <PASSWORD>})
self.assertRedirects(response, '/lfs/profile/{0}/'.format(self.user.id))
self.c.logout()
self.assertEqual(self.c.login(username="jdoe", password=<PASSWORD>),
False)
self.assertEqual(self.c.login(username="jdoe", password=<PASSWORD>),
True)
def test_all_modules_viewable_by_user(self):
"""
A user should be able to see all available modules
in order to pick/modify his choices
"""
# Add modules
mods = ["First Module", "Introduction Module", "Random stuff",
"Fillers", "How to teach", "Sustainable energy"]
for t in mods:
Module.objects.create(title=t)
# Assign a random module to user
mod = Module.objects.all()[0]
Takers.objects.create(module=mod, user=self.user)
self.c.login(username="jdoe", password="<PASSWORD>")
response = self.c.get('/lfs/modules/')
self.assertEqual(response.context['is_admin'], False)
self.assertEqual(len(response.context['modules']), len(mods) - 1)
self.assertEqual(len(response.context['modules_taken']), 1)
self.assertEqual(response.context['modules_taken'][0], mod)
def test_user_pick_module(self):
"""
A user should be able to pick modules at will
"""
# Random unassigned module
new_mod = Module.objects.create(title="Unpicked module")
self.c.login(username="jdoe", password="<PASSWORD>")
response = self.c.get('/lfs/pick_module/{0}/'.format(new_mod.id))
self.assertRedirects(response, '/lfs/modules/')
self.assertEqual(Takers.objects.filter(user=self.user,
module=new_mod).exists(), True)
def test_user_drop_module(self):
"""
A user should be able to drop modules at will
"""
# Create a module and assign it to user
user_mod = Module.objects.create(title="Picked module")
Takers.objects.create(user=self.user, module = user_mod)
self.c.login(username="jdoe", password="<PASSWORD>")
response = self.c.get('/lfs/drop_module/{0}/'.format(user_mod.id))
self.assertRedirects(response, '/lfs/modules/')
self.assertEqual(Takers.objects.filter(user=self.user,
module=user_mod).exists(), False)
def test_user_trial(self):
"""
Unregistered users should be able to start a trial
"""
mod = Module.objects.create(title="New Mod")
response = self.c.get('/lfs/trial/')
cont = response.context
# mod is not a trial module -> empty
self.assertEqual(response.status_code, 200)
self.assertEqual(cont['trial'], True)
self.assertEqual(cont['modules_total'], 1)
self.assertEqual(cont['overall_progress'], 0)
self.assertEqual(len(cont['modules_progress']), 0)
mod_t = Module.objects.create(title="Trial Mod", trial=True)
response = self.c.get('/lfs/trial/')
cont = response.context
# mod_t is a trial module
self.assertEqual(response.status_code, 200)
self.assertEqual(cont['trial'], True)
self.assertEqual(cont['modules_total'], 2)
self.assertEqual(cont['overall_progress'], 0)
self.assertEqual(len(cont['modules_progress']), 1)
def test_user_trial_module(self):
"""
Unregistered users should have access to a trial module's content
"""
mod = Module.objects.create(title="Trial mod", trial=True)
Page.objects.create(module=mod,section="no",content="no")
response = self.c.get('/lfs/trial/module/{0}/'.format(mod.id))
cont = response.context
self.assertEqual(response.status_code, 200)
self.assertEqual(cont['module_title'], "Trial mod")
self.assertEqual(cont['user_progress_on_module'], 0)
| from django.test import TestCase, Client
from django.contrib.auth.models import User, AnonymousUser
from models import Teacher, Module, Takers, Page, ContentFile
class UserAppTests(TestCase):
def setUp(self):
self.c = Client()
self.user = User.objects.create_user("jdoe",
"<EMAIL>",
"123",
first_name="John",
last_name="Doe")
self.teacher = Teacher.objects.create(user=self.user,
bio="I am a dummy teacher",
school="Dummy School")
def test_user_registration_model(self):
"""
On registering, a new user is created, tied to an 'empty'
Teacher model
"""
# Assure DB is properly populated with correct data
self.assertEqual(User.objects.filter(username="jdoe").count(), 1)
self.assertEqual(Teacher.objects.filter(user__username="jdoe").count(), 1)
def test_user_registration_api(self):
"""
Correct registration POST will result in a properly populated
DB with both User and Teacher
"""
# attempt to send data which does not comply with the Terms and Cond
response = self.c.post('/lfs/register/', {'username': 'jstorer',
'email': '<EMAIL>',
'first_name': 'Jeremy',
'last_name': 'Storer',
'password': '<PASSWORD>',
'terms' : False})
self.assertEqual(response.status_code, 200)
self.assertEqual(User.objects.filter(username='jstorer').count(), 0)
# send terms-compliant data for a dummy user
response = self.c.post('/lfs/register/', {'username': 'jstorer',
'email': '<EMAIL>',
'first_name': 'Jeremy',
'last_name': 'Storer',
'password': '<PASSWORD>',
'terms' : True})
# Should be redirected to login page
self.assertRedirects(response, '/lfs/login/')
# Assure DB is properly populated with correct data
self.assertEqual(User.objects.filter(username='jstorer').count(), 1)
new_user = User.objects.filter(username='jstorer')[0]
self.assertEqual(new_user.first_name, 'Jeremy')
self.assertEqual(new_user.last_name, 'Storer')
self.assertEqual(Teacher.objects.filter(user__username='jstorer').count(), 1)
new_teacher = Teacher.objects.filter(user__username='jstorer')[0]
self.assertEqual(new_teacher.bio, '')
self.assertEqual(new_teacher.school, '')
def test_user_login(self):
"""
A user who is in the DB should be able to log in (as a teacher)
"""
# send a POST with username/pass
response = self.c.post('/lfs/login/', {'username': 'jdoe', 'password': '<PASSWORD>'})
# should be at dashboard
self.assertRedirects(response, '/lfs/dashboard/')
def test_user_logout(self):
"""
An existing user should be able to log out if currently logged in
"""
# log in (should redirect)
login_response = self.c.post('/lfs/login/', {'username': 'jdoe', 'password': '<PASSWORD>'})
self.assertEqual(login_response.status_code, 302)
logout_response = self.c.post('/lfs/logout/', {})
self.assertRedirects(logout_response, '/lfs/login/')
def test_user_dashboard(self):
"""
A logged in user should see their dashboard, with all
modules taken, their per-module and overall progress
"""
user_two = User.objects.create_user("tsinger", "<EMAIL>", "123",
first_name="Tim", last_name="Singer")
teacher_two = Teacher.objects.create(user=user_two, bio="I teach programming",
school="Glasgow School")
# populate DB with modules taken by user, with random progress stats
data = {"Introduction to Sustainable Development" : 12,
"What does it all mean?" : 59,
"How to teach students" : 77}
dummy_modules = []
for key in data:
module = Module.objects.create(title=key)
dummy_modules.append(module)
Takers.objects.create(user=self.user, module=module, progress=data[key])
# assign two of the modules to the other user (with different progress)
Takers.objects.create(user=user_two, module=Module.objects
.filter(title="What does it all mean?")[0],
progress=99)
Takers.objects.create(user=user_two, module=Module.objects
.filter(title="How to teach students")[0],
progress=32)
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
# dashboard GET
dashboard_response = self.c.get('/lfs/dashboard/')
# check modules
self.assertEqual(set(dashboard_response.context['modules_progress'].keys()),
set(dummy_modules))
# check per-module progress
for key in dummy_modules:
self.assertEqual(dashboard_response.context['modules_progress'][key],
data[key.title])
# check overall progress
self.assertEqual(dashboard_response.context['overall_progress'],
reduce(lambda x,y: x+y, [data[key] for key in data]) / len(data))
def test_module_content_viewable_by_user(self):
"""
A user should be able to see all available modules' contents
"""
# Assign random modules to test user
mod1 = Module.objects.create(title="First Module")
mod2 = Module.objects.create(title="Second Module")
Takers.objects.create(user=self.user, module=mod1)
Takers.objects.create(user=self.user, module=mod2)
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
# module that our user has taken (not necessary for viewing)
module = Module.objects.create(title="Third Module")
Takers.objects.create(user=self.user, module=module)
# assign some pages to module
Page.objects.create(module=module,
position=0,
section="First Page",
content="""
Vivamus sit amet auctor nisl, in auctor augue.
Nullam a purus eu erat semper eleifend quis a ex.
Phasellus a tortor quis lectus ultrices vestibulum sit amet ac nunc.
In rutrum hendrerit lorem non consequat. Ut malesuada orci ligula,
eu dapibus est viverra sed. Fusce lacinia ante non porta cursus.
""")
Page.objects.create(module=module,
position=1,
section="Second Page",
content="""
Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Nunc pellentesque nec orci dignissim congue. Quisque nec interdum augue.
Praesent ultricies felis nec nulla gravida tempor. In consequat aliquam
congue.
""")
# module GET
module_response = self.c.get('/lfs/module/{0}/'.format(module.id))
# check currently viewable module
self.assertEqual(module_response.context['module_title'],module.title)
self.assertEqual(set(module_response.context['module_pages']),
set(tuple(i for i in module.page_set.all())))
self.assertContains(module_response, module.title)
# Should be on first page by default
self.assertContains(module_response, module.page_set.all()[0].section)
# Should be tabbed on currently viewed Module by default
# so all sections of that module should be displayed (navigatable)
for page in module.page_set.all():
self.assertContains(module_response, page.section)
# all other user taken modules should be visible
user_module_junctions = tuple(i.module for i in Takers.objects.filter(user=self.user))
for mod in user_module_junctions:
self.assertContains(module_response, mod.title)
def test_user_leaderboard(self):
"""
Users should be able to see a leaderboard of all users using the website
"""
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
# assign some modules to dummy user to check progress
mod_one = Module.objects.create(title="First Module")
mod_two = Module.objects.create(title="Second Module")
Takers.objects.create(user=self.user, module=mod_one, progress=55)
Takers.objects.create(user=self.user, module=mod_two, progress=35)
# leaderboards GET
response = self.c.get('/lfs/leaderboard/')
# check all users are passed with their progress
user_count = Teacher.objects.all().count()
self.assertEqual(len(response.context['all_users_overall_progress']), user_count)
# check progress
self.assertEqual(response.context['all_users_overall_progress'][self.user],
45)
def test_user_profile(self):
"""
A registered user should be able to inspect their own as well
as others' profiles
"""
user_j = User.objects.create_user("jstorer", "<EMAIL>", "123",
first_name="Jeremy", last_name="Storer")
teacher_j = Teacher.objects.create(user=user_j, bio="I am a teacher",
school="Glasgow University")
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
# inspect own profile
own_response = self.c.get('/lfs/profile/{0}/'.format(self.user.id))
self.assertEqual(self.teacher, own_response.context['teacher'])
# inspect someone else's profile
else_response = self.c.get('/lfs/profile/{0}/'.format(user_j.id))
self.assertEqual(teacher_j, else_response.context['teacher'])
def test_user_get_edit_profile(self):
"""
Logged in users should be able to edit their own profiles
"""
user_j = User.objects.create_user("jstorer", "<EMAIL>", "123",
first_name="Jeremy", last_name="Storer")
teacher_j = Teacher.objects.create(user=user_j, bio="I am a teacher",
school="Glasgow University")
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
# test editing own profile
own_response = self.c.get('/lfs/profile/{0}/edit/'.format(self.user.id))
self.assertEqual(own_response.context['teacher'], self.teacher)
self.assertEqual(own_response.context['user_form'].instance, self.user)
# test editing some else's (not allowed)
else_response = self.c.get('/lfs/profile/{0}/edit/'.format(user_j.id))
self.assertRedirects(else_response, '/lfs/profile/{0}/'.format(user_j.id))
def test_user_post_edit_profile(self):
"""
Logged in users should be able to edit their own profiles
"""
user_j = User.objects.create_user("jstorer", "<EMAIL>", "123",
first_name="Jeremy", last_name="Storer")
teacher_j = Teacher.objects.create(user=user_j, bio="I am a teacher",
school="Glasgow University")
# log in
self.c.login(username="jdoe", password="<PASSWORD>")
own_response = self.c.post('/lfs/profile/{0}/edit/'.format(self.user.id),
{'username' : 'jdoe',
'password' : '<PASSWORD>' ,
'first_name' : 'Gethin',
'last_name' : 'Gay',
'bio' : 'new bio',
'school' : 'Caledonian'})
def test_user_change_password(self):
"""
A user should be able to request change
of their password
"""
self.c.login(username="jdoe", password="<PASSWORD>")
new_p = '<PASSWORD>'
old_p = '123'
# TODO: enable once GET requests are handled in views
# Wrong old password
#response = self.c.post('/lfs/change_password/{0}'.format(self.user.id),
# {'old_password' : '<PASSWORD>',
# 'new_password' : <PASSWORD>,
# 'confirm_new_password' : <PASSWORD>})
#self.assertEqual(self.user.password, <PASSWORD>_p)
# Wrong new password confirmation
#response = self.c.post('/lfs/change_password/{0}'.format(self.user.id),
# {'old_password' : <PASSWORD>,
# 'new_password' : <PASSWORD>,
# 'confirm_new_password' : '<PASSWORD>'})
#self.assertEqual(self.user.password, <PASSWORD>_p)
# Proper
response = self.c.post('/lfs/change_password/',
{'old_password' : <PASSWORD>,
'new_password1' : <PASSWORD>,
'new_password2' : <PASSWORD>})
self.assertRedirects(response, '/lfs/profile/{0}/'.format(self.user.id))
self.c.logout()
self.assertEqual(self.c.login(username="jdoe", password=<PASSWORD>),
False)
self.assertEqual(self.c.login(username="jdoe", password=<PASSWORD>),
True)
def test_all_modules_viewable_by_user(self):
"""
A user should be able to see all available modules
in order to pick/modify his choices
"""
# Add modules
mods = ["First Module", "Introduction Module", "Random stuff",
"Fillers", "How to teach", "Sustainable energy"]
for t in mods:
Module.objects.create(title=t)
# Assign a random module to user
mod = Module.objects.all()[0]
Takers.objects.create(module=mod, user=self.user)
self.c.login(username="jdoe", password="<PASSWORD>")
response = self.c.get('/lfs/modules/')
self.assertEqual(response.context['is_admin'], False)
self.assertEqual(len(response.context['modules']), len(mods) - 1)
self.assertEqual(len(response.context['modules_taken']), 1)
self.assertEqual(response.context['modules_taken'][0], mod)
def test_user_pick_module(self):
"""
A user should be able to pick modules at will
"""
# Random unassigned module
new_mod = Module.objects.create(title="Unpicked module")
self.c.login(username="jdoe", password="<PASSWORD>")
response = self.c.get('/lfs/pick_module/{0}/'.format(new_mod.id))
self.assertRedirects(response, '/lfs/modules/')
self.assertEqual(Takers.objects.filter(user=self.user,
module=new_mod).exists(), True)
def test_user_drop_module(self):
"""
A user should be able to drop modules at will
"""
# Create a module and assign it to user
user_mod = Module.objects.create(title="Picked module")
Takers.objects.create(user=self.user, module = user_mod)
self.c.login(username="jdoe", password="<PASSWORD>")
response = self.c.get('/lfs/drop_module/{0}/'.format(user_mod.id))
self.assertRedirects(response, '/lfs/modules/')
self.assertEqual(Takers.objects.filter(user=self.user,
module=user_mod).exists(), False)
def test_user_trial(self):
"""
Unregistered users should be able to start a trial
"""
mod = Module.objects.create(title="New Mod")
response = self.c.get('/lfs/trial/')
cont = response.context
# mod is not a trial module -> empty
self.assertEqual(response.status_code, 200)
self.assertEqual(cont['trial'], True)
self.assertEqual(cont['modules_total'], 1)
self.assertEqual(cont['overall_progress'], 0)
self.assertEqual(len(cont['modules_progress']), 0)
mod_t = Module.objects.create(title="Trial Mod", trial=True)
response = self.c.get('/lfs/trial/')
cont = response.context
# mod_t is a trial module
self.assertEqual(response.status_code, 200)
self.assertEqual(cont['trial'], True)
self.assertEqual(cont['modules_total'], 2)
self.assertEqual(cont['overall_progress'], 0)
self.assertEqual(len(cont['modules_progress']), 1)
def test_user_trial_module(self):
"""
Unregistered users should have access to a trial module's content
"""
mod = Module.objects.create(title="Trial mod", trial=True)
Page.objects.create(module=mod,section="no",content="no")
response = self.c.get('/lfs/trial/module/{0}/'.format(mod.id))
cont = response.context
self.assertEqual(response.status_code, 200)
self.assertEqual(cont['module_title'], "Trial mod")
self.assertEqual(cont['user_progress_on_module'], 0)
| en | 0.798145 | On registering, a new user is created, tied to an 'empty' Teacher model # Assure DB is properly populated with correct data Correct registration POST will result in a properly populated DB with both User and Teacher # attempt to send data which does not comply with the Terms and Cond # send terms-compliant data for a dummy user # Should be redirected to login page # Assure DB is properly populated with correct data A user who is in the DB should be able to log in (as a teacher) # send a POST with username/pass # should be at dashboard An existing user should be able to log out if currently logged in # log in (should redirect) A logged in user should see their dashboard, with all modules taken, their per-module and overall progress # populate DB with modules taken by user, with random progress stats # assign two of the modules to the other user (with different progress) # log in # dashboard GET # check modules # check per-module progress # check overall progress A user should be able to see all available modules' contents # Assign random modules to test user # log in # module that our user has taken (not necessary for viewing) # assign some pages to module Vivamus sit amet auctor nisl, in auctor augue. Nullam a purus eu erat semper eleifend quis a ex. Phasellus a tortor quis lectus ultrices vestibulum sit amet ac nunc. In rutrum hendrerit lorem non consequat. Ut malesuada orci ligula, eu dapibus est viverra sed. Fusce lacinia ante non porta cursus. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc pellentesque nec orci dignissim congue. Quisque nec interdum augue. Praesent ultricies felis nec nulla gravida tempor. In consequat aliquam congue. # module GET # check currently viewable module # Should be on first page by default # Should be tabbed on currently viewed Module by default # so all sections of that module should be displayed (navigatable) # all other user taken modules should be visible Users should be able to see a leaderboard of all users using the website # log in # assign some modules to dummy user to check progress # leaderboards GET # check all users are passed with their progress # check progress A registered user should be able to inspect their own as well as others' profiles # log in # inspect own profile # inspect someone else's profile Logged in users should be able to edit their own profiles # log in # test editing own profile # test editing some else's (not allowed) Logged in users should be able to edit their own profiles # log in A user should be able to request change of their password # TODO: enable once GET requests are handled in views # Wrong old password #response = self.c.post('/lfs/change_password/{0}'.format(self.user.id), # {'old_password' : '<PASSWORD>', # 'new_password' : <PASSWORD>, # 'confirm_new_password' : <PASSWORD>}) #self.assertEqual(self.user.password, <PASSWORD>_p) # Wrong new password confirmation #response = self.c.post('/lfs/change_password/{0}'.format(self.user.id), # {'old_password' : <PASSWORD>, # 'new_password' : <PASSWORD>, # 'confirm_new_password' : '<PASSWORD>'}) #self.assertEqual(self.user.password, <PASSWORD>_p) # Proper A user should be able to see all available modules in order to pick/modify his choices # Add modules # Assign a random module to user A user should be able to pick modules at will # Random unassigned module A user should be able to drop modules at will # Create a module and assign it to user Unregistered users should be able to start a trial # mod is not a trial module -> empty # mod_t is a trial module Unregistered users should have access to a trial module's content | 2.714148 | 3 |
pout/path.py | Jaymon/pout | 25 | 6633230 | <reponame>Jaymon/pout<filename>pout/path.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import os
import logging
import sys
import site
import inspect
from .compat import String
logger = logging.getLogger(__name__)
class Path(String):
"""Returns a path string relative to the current working directory (if applicable)"""
def __new__(cls, path):
cwd = os.getcwd()
if path.startswith(cwd):
path = path.replace(cwd, "", 1).lstrip(os.sep)
return super(Path, cls).__new__(cls, path)
class SitePackagesDir(String):
"""Finds the site-packages directory and sets the value of this string to that
path"""
def __new__(cls):
basepath = ""
try:
paths = site.getsitepackages()
basepath = paths[0]
logger.debug(
"Found site-packages directory {} using site.getsitepackages".format(
basepath
)
)
except AttributeError:
# we are probably running this in a virtualenv, so let's try a different
# approach
# try and brute-force discover it since it's not defined where it
# should be defined
sitepath = os.path.join(os.path.dirname(site.__file__), "site-packages")
if os.path.isdir(sitepath):
basepath = sitepath
logger.debug(
"Found site-packages directory {} using site.__file__".format(
basepath
)
)
else:
for path in sys.path:
if path.endswith("site-packages"):
basepath = path
logger.debug(
"Found site-packages directory {} using sys.path".format(
basepath
)
)
break
if not basepath:
for path in sys.path:
if path.endswith("dist-packages"):
basepath = path
logger.debug(
"Found dist-packages directory {} using sys.path".format(
basepath
)
)
break
if not basepath:
raise IOError("Could not find site-packages directory")
return super(SitePackagesDir, cls).__new__(cls, basepath)
class SiteCustomizeFile(String):
"""sets the value of the string to the sitecustomize.py file, and adds handy
helper functions to manipulate it"""
@property
def body(self):
if not self.exists():
return ""
with open(self, mode="r") as fp:
return fp.read()
def __new__(cls):
filepath = ""
if "sitecustomize" in sys.modules:
filepath = ModuleFile("sitecustomize")
if not filepath:
basepath = SitePackagesDir()
filepath = os.path.join(basepath, "sitecustomize.py")
instance = super(SiteCustomizeFile, cls).__new__(cls, filepath)
return instance
def inject(self):
"""inject code into sitecustomize.py that will inject pout into the builtins
so it will be available globally"""
if self.is_injected():
return False
with open(self, mode="a+") as fp:
fp.seek(0)
fp.write("\n".join([
"",
"try:",
" import pout",
"except ImportError:",
" pass",
"else:",
" pout.inject()",
"",
]))
return True
def exists(self):
return os.path.isfile(self)
def is_injected(self):
body = self.body
return "import pout" in body
class ModuleFile(String):
"""Given a module name (eg, foo) find the source file that corresponds to the
module, will be an empty string if modname's filepath can't be found"""
def __new__(cls, modname):
if isinstance(modname, String):
mod = sys.modules[modname]
else:
mod = modname
modname = mod.__name__
try:
# http://stackoverflow.com/questions/6761337/inspect-getfile-vs-inspect-getsourcefile
# first try and get the actual source file
filepath = inspect.getsourcefile(mod)
if not filepath:
# get the raw file since val doesn't have a source file (could be a .pyc or .so file)
filepath = inspect.getfile(mod)
if filepath:
path = os.path.realpath(filepath)
# !!! I have doubts this if block is needed
if filepath and not filepath.endswith(".py"):
filepath = ""
for path in sys.path:
p = os.path.join(path, "{}.py".format(modname))
if os.path.isfile(p):
filepath = p
break
except TypeError as e:
filepath = ""
return super(ModuleFile, cls).__new__(cls, filepath)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import os
import logging
import sys
import site
import inspect
from .compat import String
logger = logging.getLogger(__name__)
class Path(String):
"""Returns a path string relative to the current working directory (if applicable)"""
def __new__(cls, path):
cwd = os.getcwd()
if path.startswith(cwd):
path = path.replace(cwd, "", 1).lstrip(os.sep)
return super(Path, cls).__new__(cls, path)
class SitePackagesDir(String):
"""Finds the site-packages directory and sets the value of this string to that
path"""
def __new__(cls):
basepath = ""
try:
paths = site.getsitepackages()
basepath = paths[0]
logger.debug(
"Found site-packages directory {} using site.getsitepackages".format(
basepath
)
)
except AttributeError:
# we are probably running this in a virtualenv, so let's try a different
# approach
# try and brute-force discover it since it's not defined where it
# should be defined
sitepath = os.path.join(os.path.dirname(site.__file__), "site-packages")
if os.path.isdir(sitepath):
basepath = sitepath
logger.debug(
"Found site-packages directory {} using site.__file__".format(
basepath
)
)
else:
for path in sys.path:
if path.endswith("site-packages"):
basepath = path
logger.debug(
"Found site-packages directory {} using sys.path".format(
basepath
)
)
break
if not basepath:
for path in sys.path:
if path.endswith("dist-packages"):
basepath = path
logger.debug(
"Found dist-packages directory {} using sys.path".format(
basepath
)
)
break
if not basepath:
raise IOError("Could not find site-packages directory")
return super(SitePackagesDir, cls).__new__(cls, basepath)
class SiteCustomizeFile(String):
"""sets the value of the string to the sitecustomize.py file, and adds handy
helper functions to manipulate it"""
@property
def body(self):
if not self.exists():
return ""
with open(self, mode="r") as fp:
return fp.read()
def __new__(cls):
filepath = ""
if "sitecustomize" in sys.modules:
filepath = ModuleFile("sitecustomize")
if not filepath:
basepath = SitePackagesDir()
filepath = os.path.join(basepath, "sitecustomize.py")
instance = super(SiteCustomizeFile, cls).__new__(cls, filepath)
return instance
def inject(self):
"""inject code into sitecustomize.py that will inject pout into the builtins
so it will be available globally"""
if self.is_injected():
return False
with open(self, mode="a+") as fp:
fp.seek(0)
fp.write("\n".join([
"",
"try:",
" import pout",
"except ImportError:",
" pass",
"else:",
" pout.inject()",
"",
]))
return True
def exists(self):
return os.path.isfile(self)
def is_injected(self):
body = self.body
return "import pout" in body
class ModuleFile(String):
"""Given a module name (eg, foo) find the source file that corresponds to the
module, will be an empty string if modname's filepath can't be found"""
def __new__(cls, modname):
if isinstance(modname, String):
mod = sys.modules[modname]
else:
mod = modname
modname = mod.__name__
try:
# http://stackoverflow.com/questions/6761337/inspect-getfile-vs-inspect-getsourcefile
# first try and get the actual source file
filepath = inspect.getsourcefile(mod)
if not filepath:
# get the raw file since val doesn't have a source file (could be a .pyc or .so file)
filepath = inspect.getfile(mod)
if filepath:
path = os.path.realpath(filepath)
# !!! I have doubts this if block is needed
if filepath and not filepath.endswith(".py"):
filepath = ""
for path in sys.path:
p = os.path.join(path, "{}.py".format(modname))
if os.path.isfile(p):
filepath = p
break
except TypeError as e:
filepath = ""
return super(ModuleFile, cls).__new__(cls, filepath) | en | 0.813384 | # -*- coding: utf-8 -*- Returns a path string relative to the current working directory (if applicable) Finds the site-packages directory and sets the value of this string to that path # we are probably running this in a virtualenv, so let's try a different # approach # try and brute-force discover it since it's not defined where it # should be defined sets the value of the string to the sitecustomize.py file, and adds handy helper functions to manipulate it inject code into sitecustomize.py that will inject pout into the builtins so it will be available globally Given a module name (eg, foo) find the source file that corresponds to the module, will be an empty string if modname's filepath can't be found # http://stackoverflow.com/questions/6761337/inspect-getfile-vs-inspect-getsourcefile # first try and get the actual source file # get the raw file since val doesn't have a source file (could be a .pyc or .so file) # !!! I have doubts this if block is needed | 2.350249 | 2 |
examples/gcharttestapp/TestGChart05.py | allbuttonspressed/pyjs | 1 | 6633231 | <gh_stars>1-10
import GChartTestAppUtil
from pyjamas.chart.GChart import GChart
from pyjamas.chart import GChartConsts
from pyjamas.chart import SymbolType
# test that clipping of points to plot area works as expected
class TestGChart05 (GChart):
def __init__(self, testCanvas):
GChart.__init__(self, XChartSize=300,YChartSize=300)
self.setChartTitle(GChartTestAppUtil.getTitle(self))
self.setClipToPlotArea(True)
self.setChartFootnotes("Check: an unclipped point at each corner.<br> No x-ticks.<br>Line clipped at plot area limits<br>Three clipped-off pies visible<br>Every at-least-partly visible symbol labeled.")
self.getXAxis().setHasGridlines(True)
self.getY2Axis().setHasGridlines(True)
self.addCurve()
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
self.getCurve().setYAxis(GChartConsts.Y_AXIS)
self.getCurve().addPoint(0,-95); # clipped
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(0,-90)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(0,0)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(0,5); # clipped
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().setLegendLabel("On Y")
self.addCurve()
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
self.getCurve().setYAxis(GChartConsts.Y2_AXIS)
self.getCurve().addPoint(90,-50); # clipped
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(90,-45)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(90,45)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(90,50); # clipped
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().setLegendLabel("On Y2")
# continuous line whose edges self.get clipped off
self.addCurve()
self.getCurve().setLegendLabel("clipped line")
self.getCurve().getSymbol().setBackgroundColor("blue")
self.getCurve().getSymbol().setBorderColor("blue")
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
else:
self.getCurve().getSymbol().setFillSpacing(10)
self.getCurve().getSymbol().setFillThickness(3)
self.getCurve().setYAxis(GChartConsts.Y_AXIS)
# self.getCurve().addPoint(50,-50)
self.getCurve().addPoint(0,-100)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
# self.getCurve().addPoint(50,-50)
self.getCurve().addPoint(100,0)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
# this should be entirely visible
self.addCurve()
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
self.getCurve().setLegendLabel("inside pie")
self.getCurve().getSymbol().setSymbolType(
SymbolType.PIE_SLICE_HORIZONTAL_SHADING)
self.getCurve().getSymbol().setFillThickness(1)
self.getCurve().getSymbol().setWidth(100)
self.getCurve().getSymbol().setHeight(0)
self.getCurve().setYAxis(GChartConsts.Y_AXIS)
self.getCurve().addPoint(45,0)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
# this should be entirely clipped.
self.addCurve()
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
self.getCurve().setLegendLabel("outside right pie")
self.getCurve().getSymbol().setSymbolType(
SymbolType.PIE_SLICE_HATCHED_SHADING)
self.getCurve().getSymbol().setFillThickness(1)
self.getCurve().getSymbol().setWidth(100)
self.getCurve().getSymbol().setHeight(0)
self.getCurve().setYAxis(GChartConsts.Y2_AXIS)
self.getCurve().addPoint(95,0)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
# this should be entirely clipped
self.addCurve()
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
self.getCurve().setLegendLabel("outside bottom pie")
self.getCurve().getSymbol().setSymbolType(
SymbolType.PIE_SLICE_VERTICAL_SHADING)
self.getCurve().getSymbol().setFillThickness(1)
self.getCurve().getSymbol().setWidth(100)
self.getCurve().getSymbol().setHeight(0)
self.getCurve().setYAxis(GChartConsts.Y_AXIS)
self.getCurve().addPoint(45,-95)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getXAxis().setAxisLabel("<small><small><small>X</small></small></small>")
self.getXAxis().setTickCount(0)
self.getXAxis().setAxisMin(0.)
self.getXAxis().setAxisMax(90.)
self.getYAxis().setAxisMin(-90.)
self.getYAxis().setAxisMax(0.)
self.getY2Axis().setAxisMin(-45.)
self.getY2Axis().setAxisMax(45)
| import GChartTestAppUtil
from pyjamas.chart.GChart import GChart
from pyjamas.chart import GChartConsts
from pyjamas.chart import SymbolType
# test that clipping of points to plot area works as expected
class TestGChart05 (GChart):
def __init__(self, testCanvas):
GChart.__init__(self, XChartSize=300,YChartSize=300)
self.setChartTitle(GChartTestAppUtil.getTitle(self))
self.setClipToPlotArea(True)
self.setChartFootnotes("Check: an unclipped point at each corner.<br> No x-ticks.<br>Line clipped at plot area limits<br>Three clipped-off pies visible<br>Every at-least-partly visible symbol labeled.")
self.getXAxis().setHasGridlines(True)
self.getY2Axis().setHasGridlines(True)
self.addCurve()
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
self.getCurve().setYAxis(GChartConsts.Y_AXIS)
self.getCurve().addPoint(0,-95); # clipped
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(0,-90)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(0,0)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(0,5); # clipped
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().setLegendLabel("On Y")
self.addCurve()
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
self.getCurve().setYAxis(GChartConsts.Y2_AXIS)
self.getCurve().addPoint(90,-50); # clipped
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(90,-45)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(90,45)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().addPoint(90,50); # clipped
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getCurve().setLegendLabel("On Y2")
# continuous line whose edges self.get clipped off
self.addCurve()
self.getCurve().setLegendLabel("clipped line")
self.getCurve().getSymbol().setBackgroundColor("blue")
self.getCurve().getSymbol().setBorderColor("blue")
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
else:
self.getCurve().getSymbol().setFillSpacing(10)
self.getCurve().getSymbol().setFillThickness(3)
self.getCurve().setYAxis(GChartConsts.Y_AXIS)
# self.getCurve().addPoint(50,-50)
self.getCurve().addPoint(0,-100)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
# self.getCurve().addPoint(50,-50)
self.getCurve().addPoint(100,0)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
# this should be entirely visible
self.addCurve()
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
self.getCurve().setLegendLabel("inside pie")
self.getCurve().getSymbol().setSymbolType(
SymbolType.PIE_SLICE_HORIZONTAL_SHADING)
self.getCurve().getSymbol().setFillThickness(1)
self.getCurve().getSymbol().setWidth(100)
self.getCurve().getSymbol().setHeight(0)
self.getCurve().setYAxis(GChartConsts.Y_AXIS)
self.getCurve().addPoint(45,0)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
# this should be entirely clipped.
self.addCurve()
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
self.getCurve().setLegendLabel("outside right pie")
self.getCurve().getSymbol().setSymbolType(
SymbolType.PIE_SLICE_HATCHED_SHADING)
self.getCurve().getSymbol().setFillThickness(1)
self.getCurve().getSymbol().setWidth(100)
self.getCurve().getSymbol().setHeight(0)
self.getCurve().setYAxis(GChartConsts.Y2_AXIS)
self.getCurve().addPoint(95,0)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
# this should be entirely clipped
self.addCurve()
if testCanvas:
self.getCurve().getSymbol().setFillSpacing(0)
self.getCurve().setLegendLabel("outside bottom pie")
self.getCurve().getSymbol().setSymbolType(
SymbolType.PIE_SLICE_VERTICAL_SHADING)
self.getCurve().getSymbol().setFillThickness(1)
self.getCurve().getSymbol().setWidth(100)
self.getCurve().getSymbol().setHeight(0)
self.getCurve().setYAxis(GChartConsts.Y_AXIS)
self.getCurve().addPoint(45,-95)
self.getCurve().getPoint().setAnnotationText(self.getCurve().getPoint().getHovertext())
self.getXAxis().setAxisLabel("<small><small><small>X</small></small></small>")
self.getXAxis().setTickCount(0)
self.getXAxis().setAxisMin(0.)
self.getXAxis().setAxisMax(90.)
self.getYAxis().setAxisMin(-90.)
self.getYAxis().setAxisMax(0.)
self.getY2Axis().setAxisMin(-45.)
self.getY2Axis().setAxisMax(45) | en | 0.871037 | # test that clipping of points to plot area works as expected # clipped # clipped # clipped # clipped # continuous line whose edges self.get clipped off # self.getCurve().addPoint(50,-50) # self.getCurve().addPoint(50,-50) # this should be entirely visible # this should be entirely clipped. # this should be entirely clipped | 2.64535 | 3 |
climatology.py | oet808/PDO_CMIP5 | 0 | 6633232 | #!/usr/bin/python
###############################################################################
# Script that calls CDO
# linux command to calculate the long-term climatology
# (from annual mean data)
###############################################################################
import os
#import sys
#sys.path.append("./modules")
from cmip5 import *
def calc_clim(scen,model,run,v,startyr,endyr,realm=None):
"""Calculates climatology from annual mean data using CDO.
Input variables:
scen,model,run,v: strings indicating the scenario,
model,ensemble member run, and the variable name.
These variables are used to form the netcdf file names
that are processed with cdo.
startyr, endyr: integer numbers for the first and last year.
realm: optional string argument corresponding to the
variable processed that is used for the subfolder structure
of the CMIP5 model.
"""
app="clim" # app is used in the output file name
model_scen=TRANSLATE[scen]['scen']
model_time=TRANSLATE[scen]['time']
# adjust outpath to the subfolder structure
if realm != None:
subdir_out=model_scen+"/"+realm+"/"+v+"/"
else:
subdir_out=model_scen+"/"+v+"/"
infile=model+"_"+model_scen+"_"+v+"_"+model_time+"_"+run+"_ann.nc"
# OUTPATH: Input path and output path are the same.
outfile=model+"_"+model_scen+"_"+v+"_"+model_time+"_"+run+\
"_ann_"+app+".nc"
cdo="cdo -v timmean -selyear,"+str(startyr)+"/"+str(endyr)+" "+\
OUTPATH+subdir_out+infile+" "+OUTPATH+subdir_out+outfile
print(cdo)
os.system(cdo)
print ("Infile: "+infile)
print ("Outfile:"+outfile)
print ("Folder: "+OUTPATH)
return
# Loop over scenarios (historical only, usually)
iscen=0
scen=TRANSLATE['historical']['scen']
nmodel=0
for model in MODELLIST:
for run in ENSEMBLELIST:
i=0
for v in VARLIST:
calc_clim(scen,model,run,v,startyr=START,endyr=END,realm="ocn")
i+=1
nmodel+=1
print ("----------------------------------------------------------")
print ("stats for simulations "+scen+" : variable "+v)
print ("models: "+str(nmodel)+" variables: "+str(i))
iscen+=1
| #!/usr/bin/python
###############################################################################
# Script that calls CDO
# linux command to calculate the long-term climatology
# (from annual mean data)
###############################################################################
import os
#import sys
#sys.path.append("./modules")
from cmip5 import *
def calc_clim(scen,model,run,v,startyr,endyr,realm=None):
"""Calculates climatology from annual mean data using CDO.
Input variables:
scen,model,run,v: strings indicating the scenario,
model,ensemble member run, and the variable name.
These variables are used to form the netcdf file names
that are processed with cdo.
startyr, endyr: integer numbers for the first and last year.
realm: optional string argument corresponding to the
variable processed that is used for the subfolder structure
of the CMIP5 model.
"""
app="clim" # app is used in the output file name
model_scen=TRANSLATE[scen]['scen']
model_time=TRANSLATE[scen]['time']
# adjust outpath to the subfolder structure
if realm != None:
subdir_out=model_scen+"/"+realm+"/"+v+"/"
else:
subdir_out=model_scen+"/"+v+"/"
infile=model+"_"+model_scen+"_"+v+"_"+model_time+"_"+run+"_ann.nc"
# OUTPATH: Input path and output path are the same.
outfile=model+"_"+model_scen+"_"+v+"_"+model_time+"_"+run+\
"_ann_"+app+".nc"
cdo="cdo -v timmean -selyear,"+str(startyr)+"/"+str(endyr)+" "+\
OUTPATH+subdir_out+infile+" "+OUTPATH+subdir_out+outfile
print(cdo)
os.system(cdo)
print ("Infile: "+infile)
print ("Outfile:"+outfile)
print ("Folder: "+OUTPATH)
return
# Loop over scenarios (historical only, usually)
iscen=0
scen=TRANSLATE['historical']['scen']
nmodel=0
for model in MODELLIST:
for run in ENSEMBLELIST:
i=0
for v in VARLIST:
calc_clim(scen,model,run,v,startyr=START,endyr=END,realm="ocn")
i+=1
nmodel+=1
print ("----------------------------------------------------------")
print ("stats for simulations "+scen+" : variable "+v)
print ("models: "+str(nmodel)+" variables: "+str(i))
iscen+=1
| en | 0.570109 | #!/usr/bin/python ############################################################################### # Script that calls CDO # linux command to calculate the long-term climatology # (from annual mean data) ############################################################################### #import sys #sys.path.append("./modules") Calculates climatology from annual mean data using CDO. Input variables: scen,model,run,v: strings indicating the scenario, model,ensemble member run, and the variable name. These variables are used to form the netcdf file names that are processed with cdo. startyr, endyr: integer numbers for the first and last year. realm: optional string argument corresponding to the variable processed that is used for the subfolder structure of the CMIP5 model. # app is used in the output file name # adjust outpath to the subfolder structure # OUTPATH: Input path and output path are the same. # Loop over scenarios (historical only, usually) | 2.345797 | 2 |
opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py | pitabwire/opentelemetry-python-contrib | 0 | 6633233 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# type: ignore
"""
OpenTelemetry Base Instrumentor
"""
from abc import ABC, abstractmethod
from logging import getLogger
from typing import Collection, Optional
from opentelemetry.instrumentation.dependencies import (
DependencyConflict,
get_dependency_conflicts,
)
_LOG = getLogger(__name__)
class BaseInstrumentor(ABC):
"""An ABC for instrumentors
Child classes of this ABC should instrument specific third
party libraries or frameworks either by using the
``opentelemetry-instrument`` command or by calling their methods
directly.
Since every third party library or framework is different and has different
instrumentation needs, more methods can be added to the child classes as
needed to provide practical instrumentation to the end user.
"""
_instance = None
_is_instrumented = False
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
@abstractmethod
def instrumentation_dependencies(self) -> Collection[str]:
"""Return a list of python packages with versions that the will be instrumented.
The format should be the same as used in requirements.txt or setup.py.
For example, if an instrumentation instruments requests 1.x, this method should look
like:
def instrumentation_dependencies(self) -> Collection[str]:
return ['requests ~= 1.0']
This will ensure that the instrumentation will only be used when the specified library
is present in the environment.
"""
def _instrument(self, **kwargs):
"""Instrument the library"""
@abstractmethod
def _uninstrument(self, **kwargs):
"""Uninstrument the library"""
def _check_dependency_conflicts(self) -> Optional[DependencyConflict]:
dependencies = self.instrumentation_dependencies()
return get_dependency_conflicts(dependencies)
def instrument(self, **kwargs):
"""Instrument the library
This method will be called without any optional arguments by the
``opentelemetry-instrument`` command.
This means that calling this method directly without passing any
optional values should do the very same thing that the
``opentelemetry-instrument`` command does.
"""
if self._is_instrumented:
_LOG.warning("Attempting to instrument while already instrumented")
return None
# check if instrumentor has any missing or conflicting dependencies
skip_dep_check = kwargs.pop("skip_dep_check", False)
if not skip_dep_check:
conflict = self._check_dependency_conflicts()
if conflict:
_LOG.warning(conflict)
return None
result = self._instrument( # pylint: disable=assignment-from-no-return
**kwargs
)
self._is_instrumented = True
return result
def uninstrument(self, **kwargs):
"""Uninstrument the library
See ``BaseInstrumentor.instrument`` for more information regarding the
usage of ``kwargs``.
"""
if self._is_instrumented:
result = self._uninstrument(**kwargs)
self._is_instrumented = False
return result
_LOG.warning("Attempting to uninstrument while already uninstrumented")
return None
__all__ = ["BaseInstrumentor"]
| # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# type: ignore
"""
OpenTelemetry Base Instrumentor
"""
from abc import ABC, abstractmethod
from logging import getLogger
from typing import Collection, Optional
from opentelemetry.instrumentation.dependencies import (
DependencyConflict,
get_dependency_conflicts,
)
_LOG = getLogger(__name__)
class BaseInstrumentor(ABC):
"""An ABC for instrumentors
Child classes of this ABC should instrument specific third
party libraries or frameworks either by using the
``opentelemetry-instrument`` command or by calling their methods
directly.
Since every third party library or framework is different and has different
instrumentation needs, more methods can be added to the child classes as
needed to provide practical instrumentation to the end user.
"""
_instance = None
_is_instrumented = False
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
@abstractmethod
def instrumentation_dependencies(self) -> Collection[str]:
"""Return a list of python packages with versions that the will be instrumented.
The format should be the same as used in requirements.txt or setup.py.
For example, if an instrumentation instruments requests 1.x, this method should look
like:
def instrumentation_dependencies(self) -> Collection[str]:
return ['requests ~= 1.0']
This will ensure that the instrumentation will only be used when the specified library
is present in the environment.
"""
def _instrument(self, **kwargs):
"""Instrument the library"""
@abstractmethod
def _uninstrument(self, **kwargs):
"""Uninstrument the library"""
def _check_dependency_conflicts(self) -> Optional[DependencyConflict]:
dependencies = self.instrumentation_dependencies()
return get_dependency_conflicts(dependencies)
def instrument(self, **kwargs):
"""Instrument the library
This method will be called without any optional arguments by the
``opentelemetry-instrument`` command.
This means that calling this method directly without passing any
optional values should do the very same thing that the
``opentelemetry-instrument`` command does.
"""
if self._is_instrumented:
_LOG.warning("Attempting to instrument while already instrumented")
return None
# check if instrumentor has any missing or conflicting dependencies
skip_dep_check = kwargs.pop("skip_dep_check", False)
if not skip_dep_check:
conflict = self._check_dependency_conflicts()
if conflict:
_LOG.warning(conflict)
return None
result = self._instrument( # pylint: disable=assignment-from-no-return
**kwargs
)
self._is_instrumented = True
return result
def uninstrument(self, **kwargs):
"""Uninstrument the library
See ``BaseInstrumentor.instrument`` for more information regarding the
usage of ``kwargs``.
"""
if self._is_instrumented:
result = self._uninstrument(**kwargs)
self._is_instrumented = False
return result
_LOG.warning("Attempting to uninstrument while already uninstrumented")
return None
__all__ = ["BaseInstrumentor"]
| en | 0.803107 | # Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore OpenTelemetry Base Instrumentor An ABC for instrumentors Child classes of this ABC should instrument specific third party libraries or frameworks either by using the ``opentelemetry-instrument`` command or by calling their methods directly. Since every third party library or framework is different and has different instrumentation needs, more methods can be added to the child classes as needed to provide practical instrumentation to the end user. Return a list of python packages with versions that the will be instrumented. The format should be the same as used in requirements.txt or setup.py. For example, if an instrumentation instruments requests 1.x, this method should look like: def instrumentation_dependencies(self) -> Collection[str]: return ['requests ~= 1.0'] This will ensure that the instrumentation will only be used when the specified library is present in the environment. Instrument the library Uninstrument the library Instrument the library This method will be called without any optional arguments by the ``opentelemetry-instrument`` command. This means that calling this method directly without passing any optional values should do the very same thing that the ``opentelemetry-instrument`` command does. # check if instrumentor has any missing or conflicting dependencies # pylint: disable=assignment-from-no-return Uninstrument the library See ``BaseInstrumentor.instrument`` for more information regarding the usage of ``kwargs``. | 2.138265 | 2 |
profiles_api/serializers.py | elolugo/profiles-res-api | 0 | 6633234 | <reponame>elolugo/profiles-res-api
from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""
Serializes a name field for testing our APIView.
Converts and validates the inputs to the methods
of an API
"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
""" Serializes an user profile object """
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = { # Making exceptions in the password filed
'password': {
'write_only': True, # can only be writable and not readable
'style': {'input_type': 'password'} # treat as a password field in the input form
}
}
def create(self, validated_data):
"""
After validating the data by the serializer,
Django will run the default create() function.
Overwriting the function create() so that Django
can write the password hashed to the database
"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on') #id is automatically set, and created_on
extra_kwargs = {'user_profile': {'read_only': True}}
| from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""
Serializes a name field for testing our APIView.
Converts and validates the inputs to the methods
of an API
"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
""" Serializes an user profile object """
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = { # Making exceptions in the password filed
'password': {
'write_only': True, # can only be writable and not readable
'style': {'input_type': 'password'} # treat as a password field in the input form
}
}
def create(self, validated_data):
"""
After validating the data by the serializer,
Django will run the default create() function.
Overwriting the function create() so that Django
can write the password hashed to the database
"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on') #id is automatically set, and created_on
extra_kwargs = {'user_profile': {'read_only': True}} | en | 0.77519 | Serializes a name field for testing our APIView. Converts and validates the inputs to the methods of an API Serializes an user profile object # Making exceptions in the password filed # can only be writable and not readable # treat as a password field in the input form After validating the data by the serializer, Django will run the default create() function. Overwriting the function create() so that Django can write the password hashed to the database Serializes profile feed items #id is automatically set, and created_on | 2.925701 | 3 |
ddi_search_engine/Bio/Mindy/compression.py | dbmi-pitt/DIKB-Evidence-analytics | 3 | 6633235 | import commands, os
_uncompress_table = {
".bz": "bzip2",
".BZ": "bzip2",
".gz": "gzip",
".GZ": "gzip",
".Z": "compress",
}
def open_file(filename, mode = "rb"):
ext = os.path.splitext(filename)[1]
type = _uncompress_table.get(ext)
if type is None:
return open(filename, mode)
if type == "gzip":
import gzip
gzip.open(filename, mode)
if type == "bzip2":
cmd = "bzcat --decompress"
cmd += commands.mkarg(filename)
return os.popen(cmd, mode)
if type == "compress":
cmd = "zcat -d"
cmd += commands.mkarg(filename)
return os.popen(cmd, mode)
raise AssertionError("What's a %r?" % type)
| import commands, os
_uncompress_table = {
".bz": "bzip2",
".BZ": "bzip2",
".gz": "gzip",
".GZ": "gzip",
".Z": "compress",
}
def open_file(filename, mode = "rb"):
ext = os.path.splitext(filename)[1]
type = _uncompress_table.get(ext)
if type is None:
return open(filename, mode)
if type == "gzip":
import gzip
gzip.open(filename, mode)
if type == "bzip2":
cmd = "bzcat --decompress"
cmd += commands.mkarg(filename)
return os.popen(cmd, mode)
if type == "compress":
cmd = "zcat -d"
cmd += commands.mkarg(filename)
return os.popen(cmd, mode)
raise AssertionError("What's a %r?" % type)
| none | 1 | 3.077736 | 3 |
|
google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/managed_instance_groups_utils.py | KaranToor/MA450 | 1 | 6633236 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions and classes for dealing with managed instances groups."""
import random
import re
import string
import sys
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import path_simplifier
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions
_ALLOWED_UTILIZATION_TARGET_TYPES = [
'DELTA_PER_MINUTE', 'DELTA_PER_SECOND', 'GAUGE']
_MAX_AUTOSCALER_NAME_LENGTH = 63
# 4 character chosen from between lowercase letters and numbers give >1.6M
# possibilities with no more than 100 Autoscalers in one Zone and Project
# so probability that adding an autoscaler will fail because of name conflict
# is about 6e-5.
_NUM_RANDOM_CHARACTERS_IN_AS_NAME = 4
CLOUD_PUB_SUB_VALID_RESOURCE_RE = r'^[A-Za-z][A-Za-z0-9-_.~+%]{2,}$'
class ResourceNotFoundException(exceptions.ToolException):
pass
def ArgsSupportQueueScaling(args):
return 'queue_scaling_acceptable_backlog_per_instance' in args
def AddAutoscalerArgs(parser, queue_scaling_enabled=False):
"""Adds commandline arguments to parser."""
parser.add_argument('--cool-down-period', type=arg_parsers.Duration(),
help='Number of seconds Autoscaler will wait between '
'resizing collection. Note: The Autoscaler waits '
'10 minutes before scaling down, the value entered here '
'is in addition to the initial 10 minute period.')
parser.add_argument('--description', help='Notes about Autoscaler.')
parser.add_argument('--min-num-replicas',
type=arg_parsers.BoundedInt(0, sys.maxint),
help='Minimum number of replicas Autoscaler will set.')
parser.add_argument('--max-num-replicas',
type=arg_parsers.BoundedInt(0, sys.maxint), required=True,
help='Maximum number of replicas Autoscaler will set.')
parser.add_argument('--scale-based-on-cpu',
action='store_true',
help='Autoscaler will be based on CPU utilization.')
parser.add_argument('--scale-based-on-load-balancing',
action='store_true',
help=('Use autoscaling based on load balancing '
'utilization.'))
parser.add_argument('--target-cpu-utilization',
type=arg_parsers.BoundedFloat(0.0, 1.0),
help='Autoscaler will aim to maintain CPU utilization at '
'target level (0.0 to 1.0).')
parser.add_argument('--target-load-balancing-utilization',
type=arg_parsers.BoundedFloat(0.0, None),
help='Autoscaler will aim to maintain the load balancing '
'utilization level (greater than 0.0).')
custom_metric_utilization = parser.add_argument(
'--custom-metric-utilization',
type=arg_parsers.ArgDict(
spec={
'metric': str,
'utilization-target': float,
'utilization-target-type': str,
},
),
action='append',
help=('Autoscaler will maintain the target value of a Google Cloud '
'Monitoring metric.'),
)
custom_metric_utilization.detailed_help = """
Adds a target metric value for the to the Autoscaler.
*metric*::: Protocol-free URL of a Google Cloud Monitoring metric.
*utilization-target*::: Value of the metric Autoscaler will aim to maintain
(greater than 0.0).
*utilization-target-type*::: How target is expressed. Valid values: {0}.
""".format(', '.join(_ALLOWED_UTILIZATION_TARGET_TYPES))
if queue_scaling_enabled:
cloud_pub_sub_spec = parser.add_argument(
'--queue-scaling-cloud-pub-sub',
type=arg_parsers.ArgDict(
spec={
'topic': str,
'subscription': str,
},
),
help='Scaling based on Cloud Pub/Sub queuing system.',
)
cloud_pub_sub_spec.detailed_help = """
Specifies queue-based scaling based on a Cloud Pub/Sub queuing system.
Both topic and subscription are required.
*topic*::: Topic specification. Can be just a name or a partial URL
(starting with "projects/..."). Topic must belong to the same project as
Autoscaler.
*subscription*::: Subscription specification. Can be just a name or a
partial URL (starting with "projects/..."). Subscription must belong to the
same project as Autoscaler and must be connected to the specified topic.
"""
parser.add_argument('--queue-scaling-acceptable-backlog-per-instance',
type=arg_parsers.BoundedFloat(0.0, None),
help='Queue-based scaling target: autoscaler will aim '
'to assure that average number of tasks in the queue '
'is no greater than this value.',)
parser.add_argument('--queue-scaling-single-worker-throughput',
type=arg_parsers.BoundedFloat(0.0, None),
help='Hint the autoscaler for queue-based scaling on '
'how much throughput a single worker instance is able '
'to consume.')
def _ValidateCloudPubSubResource(pubsub_spec_dict, expected_resource_type):
"""Validate Cloud Pub/Sub resource spec format."""
def RaiseInvalidArgument(message):
raise exceptions.InvalidArgumentException(
'--queue-scaling-cloud-pub-sub:{0}'.format(expected_resource_type),
message)
if expected_resource_type not in pubsub_spec_dict:
raise exceptions.ToolException(
'Both topic and subscription are required for Cloud Pub/Sub '
'queue scaling specification.')
split_resource = pubsub_spec_dict[expected_resource_type].split('/')
if len(split_resource) == 1:
resource_name = split_resource[0]
elif len(split_resource) == 4:
(project_prefix, unused_project_name,
resource_prefix, resource_name) = split_resource
if project_prefix != 'projects':
RaiseInvalidArgument(
'partial-URL format for Cloud PubSub resource does not start with '
'"projects/"')
if resource_prefix != '{0}s'.format(expected_resource_type):
RaiseInvalidArgument('not in valid resource types: topic, subscription.')
else:
RaiseInvalidArgument(
'Cloud PubSub resource must either be just a name or a partial '
'URL (starting with "projects/").')
if not re.match(CLOUD_PUB_SUB_VALID_RESOURCE_RE, resource_name):
RaiseInvalidArgument('resource name not valid.')
def ValidateAutoscalerArgs(args):
"""Validates args."""
if args.min_num_replicas and args.max_num_replicas:
if args.min_num_replicas > args.max_num_replicas:
raise exceptions.InvalidArgumentException(
'--max-num-replicas', 'can\'t be less than min num replicas.')
if args.custom_metric_utilization:
for custom_metric_utilization in args.custom_metric_utilization:
for field in ('utilization-target', 'metric', 'utilization-target-type'):
if field not in custom_metric_utilization:
raise exceptions.InvalidArgumentException(
'--custom-metric-utilization', field + ' not present.')
if custom_metric_utilization['utilization-target'] < 0:
raise exceptions.InvalidArgumentException(
'--custom-metric-utilization utilization-target', 'less than 0.')
if ArgsSupportQueueScaling(args):
queue_spec_found = False
queue_target_found = False
if args.queue_scaling_cloud_pub_sub:
_ValidateCloudPubSubResource(
args.queue_scaling_cloud_pub_sub, 'topic')
_ValidateCloudPubSubResource(
args.queue_scaling_cloud_pub_sub, 'subscription')
queue_spec_found = True
if args.queue_scaling_acceptable_backlog_per_instance is not None:
queue_target_found = True
if queue_spec_found != queue_target_found:
raise exceptions.ToolException(
'Both queue specification and queue scaling target must be provided '
'for queue-based autoscaling.')
def GetInstanceGroupManagerOrThrow(igm_ref, project, compute,
http, batch_url):
"""Retrieves the given Instance Group Manager if possible.
Args:
igm_ref: reference to the Instance Group Manager.
project: project owning resources.
compute: module representing compute api.
http: communication channel.
batch_url: batch url.
Returns:
Instance Group Manager object.
"""
if hasattr(igm_ref, 'region'):
service = compute.regionInstanceGroupManagers
request = service.GetRequestType('Get')(project=project)
request.region = igm_ref.region
if hasattr(igm_ref, 'zone'):
service = compute.instanceGroupManagers
request = service.GetRequestType('Get')(project=project)
request.zone = igm_ref.zone
request.instanceGroupManager = igm_ref.Name()
errors = []
# Run throught the generator to actually make the requests and get potential
# errors.
igm_details = list(request_helper.MakeRequests(
requests=[(service, 'Get', request)],
http=http,
batch_url=batch_url,
errors=errors
))
if errors or len(igm_details) != 1:
utils.RaiseException(errors, ResourceNotFoundException,
error_message='Could not fetch resource:')
return igm_details[0]
def AutoscalersForZones(zones, project, compute, http, batch_url,
fail_when_api_not_supported=True):
"""Finds all Autoscalers defined for a given project and zones."""
return AutoscalersForLocations(
zones=zones,
regions=None,
project=project,
compute=compute,
http=http,
batch_url=batch_url,
fail_when_api_not_supported=fail_when_api_not_supported)
def AutoscalersForLocations(zones, regions,
project, compute, http, batch_url,
fail_when_api_not_supported=True):
"""Finds all Autoscalers defined for a given project and locations.
Args:
zones: target zones
regions: target regions
project: project owning resources.
compute: module representing compute api.
http: communication channel.
batch_url: batch url.
fail_when_api_not_supported: If true, raise tool exception if API does not
support autoscaling.
Returns:
A list of Autoscaler objects.
"""
# Errors is passed through library calls and modified with
# (ERROR_CODE, ERROR_MESSAGE) tuples.
errors = []
# Explicit list() is required to unwind the generator and make sure errors
# are detected at this level.
requests = []
if zones:
requests += lister.FormatListRequests(
service=compute.autoscalers,
project=project,
scopes=zones,
scope_name='zone',
filter_expr=None)
if regions:
if hasattr(compute, 'regionAutoscalers'):
requests += lister.FormatListRequests(
service=compute.regionAutoscalers,
project=project,
scopes=regions,
scope_name='region',
filter_expr=None)
else:
if fail_when_api_not_supported:
errors.append((None, 'API does not support regional autoscaling'))
autoscalers = list(request_helper.MakeRequests(
requests=requests,
http=http,
batch_url=batch_url,
errors=errors))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not check if the Managed Instance Group is '
'Autoscaled.')
return autoscalers
def AutoscalersForMigs(migs, autoscalers, project):
"""Finds Autoscalers with target amongst given IGMs.
Args:
migs: List of triples (IGM name, scope type, scope name).
autoscalers: A list of Autoscalers to search among.
project: Project owning resources.
Returns:
A list of all Autoscalers with target on mig_names list.
"""
igm_url_regexes = []
for (name, scope_type, scope_name) in migs:
igm_url_regexes.append(
'/projects/{project}/{scopeType}/{scopeName}/'
'instanceGroupManagers/{name}$'
.format(project=project,
scopeType=(scope_type + 's'),
scopeName=scope_name,
name=name))
igm_url_regex = re.compile('(' + ')|('.join(igm_url_regexes) + ')')
result = [
autoscaler for autoscaler in autoscalers
if igm_url_regex.search(autoscaler.target)
]
return result
def AutoscalerForMig(mig_name, autoscalers, project, scope_name, scope_type):
"""Finds Autoscaler targetting given IGM.
Args:
mig_name: Name of MIG targetted by Autoscaler.
autoscalers: A list of Autoscalers to search among.
project: Project owning resources.
scope_name: Target scope.
scope_type: Target scope type.
Returns:
Autoscaler object for autoscaling the given Instance Group Manager or None
when such Autoscaler does not exist.
"""
autoscalers = AutoscalersForMigs(
[(mig_name, scope_type, scope_name)], autoscalers, project)
if autoscalers:
# For each Instance Group Manager there can be at most one Autoscaler having
# the Manager as a target, so when one is found it can be returned as it is
# the only one.
if len(autoscalers) == 1:
return autoscalers[0]
else:
raise exceptions.ToolException(
'More than one Autoscaler with given targe.')
return None
def AddAutoscalersToMigs(migs_iterator, project, compute, http,
batch_url, fail_when_api_not_supported=True):
"""Add Autoscaler to each IGM object if autoscaling is enabled for it."""
migs = list(migs_iterator)
zone_names = set([path_simplifier.Name(mig['zone'])
for mig in migs if 'zone' in mig])
region_names = set([path_simplifier.Name(mig['region'])
for mig in migs if 'region' in mig])
autoscalers = {}
all_autoscalers = AutoscalersForLocations(
zones=zone_names,
regions=region_names,
project=project,
compute=compute,
http=http,
batch_url=batch_url,
fail_when_api_not_supported=fail_when_api_not_supported)
for scope_name in list(zone_names) + list(region_names):
autoscalers[scope_name] = []
for autoscaler in all_autoscalers:
autoscaler_scope = None
if autoscaler.zone is not None:
autoscaler_scope = path_simplifier.Name(autoscaler.zone)
if hasattr(autoscaler, 'region') and autoscaler.region is not None:
autoscaler_scope = path_simplifier.Name(autoscaler.region)
if autoscaler_scope is not None:
autoscalers.setdefault(autoscaler_scope, [])
autoscalers[autoscaler_scope].append(autoscaler)
for mig in migs:
scope_name = None
scope_type = None
if 'region' in mig:
scope_name = path_simplifier.Name(mig['region'])
scope_type = 'region'
elif 'zone' in mig:
scope_name = path_simplifier.Name(mig['zone'])
scope_type = 'zone'
autoscaler = None
if scope_name and scope_type:
autoscaler = AutoscalerForMig(
mig_name=mig['name'],
autoscalers=autoscalers[scope_name],
project=project,
scope_name=scope_name,
scope_type=scope_type)
if autoscaler:
mig['autoscaler'] = autoscaler
yield mig
def _BuildCpuUtilization(args, messages):
if args.target_cpu_utilization:
return messages.AutoscalingPolicyCpuUtilization(
utilizationTarget=args.target_cpu_utilization,
)
if args.scale_based_on_cpu:
return messages.AutoscalingPolicyCpuUtilization()
return None
def _BuildCustomMetricUtilizations(args, messages):
"""Builds custom metric utilization policy list from args.
Args:
args: command line arguments.
messages: module containing message classes.
Returns:
AutoscalingPolicyCustomMetricUtilization list.
"""
result = []
if args.custom_metric_utilization:
for custom_metric_utilization in args.custom_metric_utilization:
result.append(
messages.AutoscalingPolicyCustomMetricUtilization(
utilizationTarget=custom_metric_utilization[
'utilization-target'],
metric=custom_metric_utilization['metric'],
utilizationTargetType=(
messages
.AutoscalingPolicyCustomMetricUtilization
.UtilizationTargetTypeValueValuesEnum(
custom_metric_utilization['utilization-target-type'],
)
),
)
)
return result
def _BuildLoadBalancingUtilization(args, messages):
if args.target_load_balancing_utilization:
return messages.AutoscalingPolicyLoadBalancingUtilization(
utilizationTarget=args.target_load_balancing_utilization,
)
if args.scale_based_on_load_balancing:
return messages.AutoscalingPolicyLoadBalancingUtilization()
return None
def _BuildQueueBasedScaling(args, messages):
"""Builds queue based scaling policy from args.
Args:
args: command line arguments.
messages: module containing message classes.
Returns:
AutoscalingPolicyQueueBasedScaling message object or None.
"""
if not ArgsSupportQueueScaling(args):
return None
queue_policy_dict = {}
if args.queue_scaling_cloud_pub_sub:
queue_policy_dict['cloudPubSub'] = (
messages.AutoscalingPolicyQueueBasedScalingCloudPubSub(
topic=args.queue_scaling_cloud_pub_sub['topic'],
subscription=args.queue_scaling_cloud_pub_sub['subscription']))
else:
return None # No queue spec.
if args.queue_scaling_acceptable_backlog_per_instance is not None:
queue_policy_dict['acceptableBacklogPerInstance'] = (
args.queue_scaling_acceptable_backlog_per_instance)
else:
return None # No queue target.
if args.queue_scaling_single_worker_throughput is not None:
queue_policy_dict['singleWorkerThroughputPerSec'] = (
args.queue_scaling_single_worker_throughput)
return messages.AutoscalingPolicyQueueBasedScaling(**queue_policy_dict)
def _BuildAutoscalerPolicy(args, messages):
"""Builds AutoscalingPolicy from args.
Args:
args: command line arguments.
messages: module containing message classes.
Returns:
AutoscalingPolicy message object.
"""
policy_dict = {
'coolDownPeriodSec': args.cool_down_period,
'cpuUtilization': _BuildCpuUtilization(args, messages),
'customMetricUtilizations': _BuildCustomMetricUtilizations(args,
messages),
'loadBalancingUtilization': _BuildLoadBalancingUtilization(args,
messages),
'queueBasedScaling': _BuildQueueBasedScaling(args, messages),
'maxNumReplicas': args.max_num_replicas,
'minNumReplicas': args.min_num_replicas,
}
return messages.AutoscalingPolicy(
**dict((key, value) for key, value in policy_dict.iteritems()
if value is not None)) # Filter out None values.
def AdjustAutoscalerNameForCreation(autoscaler_resource):
trimmed_name = autoscaler_resource.name[
0:(_MAX_AUTOSCALER_NAME_LENGTH - _NUM_RANDOM_CHARACTERS_IN_AS_NAME - 1)]
random_characters = [
random.choice(string.lowercase + string.digits)
for _ in range(_NUM_RANDOM_CHARACTERS_IN_AS_NAME)
]
random_suffix = ''.join(random_characters)
new_name = '{0}-{1}'.format(trimmed_name, random_suffix)
autoscaler_resource.name = new_name
def BuildAutoscaler(args, messages, igm_ref, name, zone=None, region=None):
"""Builds autoscaler message protocol buffer."""
autoscaler = messages.Autoscaler(
autoscalingPolicy=_BuildAutoscalerPolicy(args, messages),
description=args.description,
name=name,
target=igm_ref.SelfLink(),
)
if zone:
autoscaler.zone = zone
if region:
autoscaler.region = region
return autoscaler
def AddAutohealingArgs(parser):
"""Adds autohealing-related commandline arguments to parser."""
health_check_group = parser.add_mutually_exclusive_group()
health_check_group.add_argument(
'--http-health-check',
help=('Specifies the HTTP health check object used for autohealing '
'instances in this group.'))
health_check_group.add_argument(
'--https-health-check',
help=('Specifies the HTTPS health check object used for autohealing '
'instances in this group.'))
initial_delay = parser.add_argument(
'--initial-delay',
type=arg_parsers.Duration(),
help=('Specifies the length of the period during which the instance '
'is known to be initializing and should not be autohealed even '
'if unhealthy.'))
initial_delay.detailed_help = """\
Specifies the length of the period during which the instance is known to
be initializing and should not be autohealed even if unhealthy.
Valid units for this flag are ``s'' for seconds, ``m'' for minutes and
``h'' for hours. If no unit is specified, seconds is assumed. This value
cannot be greater than 1 hour.
"""
def CreateAutohealingPolicies(resources, messages, args):
"""Creates autohealing policy list from args."""
if hasattr(args, 'http_health_check'): # alpha or beta
if args.http_health_check or args.https_health_check or args.initial_delay:
policy = messages.InstanceGroupManagerAutoHealingPolicy()
if args.http_health_check:
health_check_ref = resources.Parse(
args.http_health_check,
collection='compute.httpHealthChecks')
policy.healthCheck = health_check_ref.SelfLink()
elif args.https_health_check:
health_check_ref = resources.Parse(
args.https_health_check,
collection='compute.httpsHealthChecks')
policy.healthCheck = health_check_ref.SelfLink()
if args.initial_delay:
policy.initialDelaySec = args.initial_delay
return [policy]
return []
def _GetInstanceTemplatesSet(*versions_lists):
versions_set = set()
for versions_list in versions_lists:
versions_set.update(versions_list)
return versions_set
def ValidateVersions(igm_info, new_versions, force=False):
"""Validates whether versions provided by user are consistent.
Args:
igm_info: instance group manager resource.
new_versions: list of new versions.
force: if true, we allow any combination of instance templates, as long as
they are different. If false, only the following transitions are allowed:
X -> Y, X -> (X, Y), (X, Y) -> X, (X, Y) -> Y, (X, Y) -> (X, Y)
"""
if (len(new_versions) == 2
and new_versions[0].instanceTemplate == new_versions[1].instanceTemplate):
raise exceptions.ToolException(
'Provided instance templates must be different.')
if force:
return
# Only X -> Y, X -> (X, Y), (X, Y) -> X, (X, Y) -> Y, (X, Y) -> (X, Y)
# are allowed in gcloud (unless --force)
# Equivalently, at most two versions in old and new versions set union
if igm_info.versions:
igm_templates = [version.instanceTemplate for version in igm_info.versions]
elif igm_info.instanceTemplate:
igm_templates = [igm_info.instanceTemplate]
else:
raise exceptions.ToolException(
'Either versions or instance template must be specified for '
'managed instance group.')
new_templates = [version.instanceTemplate for version in new_versions]
version_count = len(_GetInstanceTemplatesSet(igm_templates, new_templates))
if version_count > 2:
raise exceptions.ToolException(
'Update inconsistent with current state. '
'The only allowed transitions between versions are: '
'X -> Y, X -> (X, Y), (X, Y) -> X, (X, Y) -> Y, (X, Y) -> (X, Y). '
'Please check versions templates or use --force.')
| # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions and classes for dealing with managed instances groups."""
import random
import re
import string
import sys
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import path_simplifier
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions
_ALLOWED_UTILIZATION_TARGET_TYPES = [
'DELTA_PER_MINUTE', 'DELTA_PER_SECOND', 'GAUGE']
_MAX_AUTOSCALER_NAME_LENGTH = 63
# 4 character chosen from between lowercase letters and numbers give >1.6M
# possibilities with no more than 100 Autoscalers in one Zone and Project
# so probability that adding an autoscaler will fail because of name conflict
# is about 6e-5.
_NUM_RANDOM_CHARACTERS_IN_AS_NAME = 4
CLOUD_PUB_SUB_VALID_RESOURCE_RE = r'^[A-Za-z][A-Za-z0-9-_.~+%]{2,}$'
class ResourceNotFoundException(exceptions.ToolException):
pass
def ArgsSupportQueueScaling(args):
return 'queue_scaling_acceptable_backlog_per_instance' in args
def AddAutoscalerArgs(parser, queue_scaling_enabled=False):
"""Adds commandline arguments to parser."""
parser.add_argument('--cool-down-period', type=arg_parsers.Duration(),
help='Number of seconds Autoscaler will wait between '
'resizing collection. Note: The Autoscaler waits '
'10 minutes before scaling down, the value entered here '
'is in addition to the initial 10 minute period.')
parser.add_argument('--description', help='Notes about Autoscaler.')
parser.add_argument('--min-num-replicas',
type=arg_parsers.BoundedInt(0, sys.maxint),
help='Minimum number of replicas Autoscaler will set.')
parser.add_argument('--max-num-replicas',
type=arg_parsers.BoundedInt(0, sys.maxint), required=True,
help='Maximum number of replicas Autoscaler will set.')
parser.add_argument('--scale-based-on-cpu',
action='store_true',
help='Autoscaler will be based on CPU utilization.')
parser.add_argument('--scale-based-on-load-balancing',
action='store_true',
help=('Use autoscaling based on load balancing '
'utilization.'))
parser.add_argument('--target-cpu-utilization',
type=arg_parsers.BoundedFloat(0.0, 1.0),
help='Autoscaler will aim to maintain CPU utilization at '
'target level (0.0 to 1.0).')
parser.add_argument('--target-load-balancing-utilization',
type=arg_parsers.BoundedFloat(0.0, None),
help='Autoscaler will aim to maintain the load balancing '
'utilization level (greater than 0.0).')
custom_metric_utilization = parser.add_argument(
'--custom-metric-utilization',
type=arg_parsers.ArgDict(
spec={
'metric': str,
'utilization-target': float,
'utilization-target-type': str,
},
),
action='append',
help=('Autoscaler will maintain the target value of a Google Cloud '
'Monitoring metric.'),
)
custom_metric_utilization.detailed_help = """
Adds a target metric value for the to the Autoscaler.
*metric*::: Protocol-free URL of a Google Cloud Monitoring metric.
*utilization-target*::: Value of the metric Autoscaler will aim to maintain
(greater than 0.0).
*utilization-target-type*::: How target is expressed. Valid values: {0}.
""".format(', '.join(_ALLOWED_UTILIZATION_TARGET_TYPES))
if queue_scaling_enabled:
cloud_pub_sub_spec = parser.add_argument(
'--queue-scaling-cloud-pub-sub',
type=arg_parsers.ArgDict(
spec={
'topic': str,
'subscription': str,
},
),
help='Scaling based on Cloud Pub/Sub queuing system.',
)
cloud_pub_sub_spec.detailed_help = """
Specifies queue-based scaling based on a Cloud Pub/Sub queuing system.
Both topic and subscription are required.
*topic*::: Topic specification. Can be just a name or a partial URL
(starting with "projects/..."). Topic must belong to the same project as
Autoscaler.
*subscription*::: Subscription specification. Can be just a name or a
partial URL (starting with "projects/..."). Subscription must belong to the
same project as Autoscaler and must be connected to the specified topic.
"""
parser.add_argument('--queue-scaling-acceptable-backlog-per-instance',
type=arg_parsers.BoundedFloat(0.0, None),
help='Queue-based scaling target: autoscaler will aim '
'to assure that average number of tasks in the queue '
'is no greater than this value.',)
parser.add_argument('--queue-scaling-single-worker-throughput',
type=arg_parsers.BoundedFloat(0.0, None),
help='Hint the autoscaler for queue-based scaling on '
'how much throughput a single worker instance is able '
'to consume.')
def _ValidateCloudPubSubResource(pubsub_spec_dict, expected_resource_type):
"""Validate Cloud Pub/Sub resource spec format."""
def RaiseInvalidArgument(message):
raise exceptions.InvalidArgumentException(
'--queue-scaling-cloud-pub-sub:{0}'.format(expected_resource_type),
message)
if expected_resource_type not in pubsub_spec_dict:
raise exceptions.ToolException(
'Both topic and subscription are required for Cloud Pub/Sub '
'queue scaling specification.')
split_resource = pubsub_spec_dict[expected_resource_type].split('/')
if len(split_resource) == 1:
resource_name = split_resource[0]
elif len(split_resource) == 4:
(project_prefix, unused_project_name,
resource_prefix, resource_name) = split_resource
if project_prefix != 'projects':
RaiseInvalidArgument(
'partial-URL format for Cloud PubSub resource does not start with '
'"projects/"')
if resource_prefix != '{0}s'.format(expected_resource_type):
RaiseInvalidArgument('not in valid resource types: topic, subscription.')
else:
RaiseInvalidArgument(
'Cloud PubSub resource must either be just a name or a partial '
'URL (starting with "projects/").')
if not re.match(CLOUD_PUB_SUB_VALID_RESOURCE_RE, resource_name):
RaiseInvalidArgument('resource name not valid.')
def ValidateAutoscalerArgs(args):
"""Validates args."""
if args.min_num_replicas and args.max_num_replicas:
if args.min_num_replicas > args.max_num_replicas:
raise exceptions.InvalidArgumentException(
'--max-num-replicas', 'can\'t be less than min num replicas.')
if args.custom_metric_utilization:
for custom_metric_utilization in args.custom_metric_utilization:
for field in ('utilization-target', 'metric', 'utilization-target-type'):
if field not in custom_metric_utilization:
raise exceptions.InvalidArgumentException(
'--custom-metric-utilization', field + ' not present.')
if custom_metric_utilization['utilization-target'] < 0:
raise exceptions.InvalidArgumentException(
'--custom-metric-utilization utilization-target', 'less than 0.')
if ArgsSupportQueueScaling(args):
queue_spec_found = False
queue_target_found = False
if args.queue_scaling_cloud_pub_sub:
_ValidateCloudPubSubResource(
args.queue_scaling_cloud_pub_sub, 'topic')
_ValidateCloudPubSubResource(
args.queue_scaling_cloud_pub_sub, 'subscription')
queue_spec_found = True
if args.queue_scaling_acceptable_backlog_per_instance is not None:
queue_target_found = True
if queue_spec_found != queue_target_found:
raise exceptions.ToolException(
'Both queue specification and queue scaling target must be provided '
'for queue-based autoscaling.')
def GetInstanceGroupManagerOrThrow(igm_ref, project, compute,
http, batch_url):
"""Retrieves the given Instance Group Manager if possible.
Args:
igm_ref: reference to the Instance Group Manager.
project: project owning resources.
compute: module representing compute api.
http: communication channel.
batch_url: batch url.
Returns:
Instance Group Manager object.
"""
if hasattr(igm_ref, 'region'):
service = compute.regionInstanceGroupManagers
request = service.GetRequestType('Get')(project=project)
request.region = igm_ref.region
if hasattr(igm_ref, 'zone'):
service = compute.instanceGroupManagers
request = service.GetRequestType('Get')(project=project)
request.zone = igm_ref.zone
request.instanceGroupManager = igm_ref.Name()
errors = []
# Run throught the generator to actually make the requests and get potential
# errors.
igm_details = list(request_helper.MakeRequests(
requests=[(service, 'Get', request)],
http=http,
batch_url=batch_url,
errors=errors
))
if errors or len(igm_details) != 1:
utils.RaiseException(errors, ResourceNotFoundException,
error_message='Could not fetch resource:')
return igm_details[0]
def AutoscalersForZones(zones, project, compute, http, batch_url,
fail_when_api_not_supported=True):
"""Finds all Autoscalers defined for a given project and zones."""
return AutoscalersForLocations(
zones=zones,
regions=None,
project=project,
compute=compute,
http=http,
batch_url=batch_url,
fail_when_api_not_supported=fail_when_api_not_supported)
def AutoscalersForLocations(zones, regions,
project, compute, http, batch_url,
fail_when_api_not_supported=True):
"""Finds all Autoscalers defined for a given project and locations.
Args:
zones: target zones
regions: target regions
project: project owning resources.
compute: module representing compute api.
http: communication channel.
batch_url: batch url.
fail_when_api_not_supported: If true, raise tool exception if API does not
support autoscaling.
Returns:
A list of Autoscaler objects.
"""
# Errors is passed through library calls and modified with
# (ERROR_CODE, ERROR_MESSAGE) tuples.
errors = []
# Explicit list() is required to unwind the generator and make sure errors
# are detected at this level.
requests = []
if zones:
requests += lister.FormatListRequests(
service=compute.autoscalers,
project=project,
scopes=zones,
scope_name='zone',
filter_expr=None)
if regions:
if hasattr(compute, 'regionAutoscalers'):
requests += lister.FormatListRequests(
service=compute.regionAutoscalers,
project=project,
scopes=regions,
scope_name='region',
filter_expr=None)
else:
if fail_when_api_not_supported:
errors.append((None, 'API does not support regional autoscaling'))
autoscalers = list(request_helper.MakeRequests(
requests=requests,
http=http,
batch_url=batch_url,
errors=errors))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not check if the Managed Instance Group is '
'Autoscaled.')
return autoscalers
def AutoscalersForMigs(migs, autoscalers, project):
"""Finds Autoscalers with target amongst given IGMs.
Args:
migs: List of triples (IGM name, scope type, scope name).
autoscalers: A list of Autoscalers to search among.
project: Project owning resources.
Returns:
A list of all Autoscalers with target on mig_names list.
"""
igm_url_regexes = []
for (name, scope_type, scope_name) in migs:
igm_url_regexes.append(
'/projects/{project}/{scopeType}/{scopeName}/'
'instanceGroupManagers/{name}$'
.format(project=project,
scopeType=(scope_type + 's'),
scopeName=scope_name,
name=name))
igm_url_regex = re.compile('(' + ')|('.join(igm_url_regexes) + ')')
result = [
autoscaler for autoscaler in autoscalers
if igm_url_regex.search(autoscaler.target)
]
return result
def AutoscalerForMig(mig_name, autoscalers, project, scope_name, scope_type):
"""Finds Autoscaler targetting given IGM.
Args:
mig_name: Name of MIG targetted by Autoscaler.
autoscalers: A list of Autoscalers to search among.
project: Project owning resources.
scope_name: Target scope.
scope_type: Target scope type.
Returns:
Autoscaler object for autoscaling the given Instance Group Manager or None
when such Autoscaler does not exist.
"""
autoscalers = AutoscalersForMigs(
[(mig_name, scope_type, scope_name)], autoscalers, project)
if autoscalers:
# For each Instance Group Manager there can be at most one Autoscaler having
# the Manager as a target, so when one is found it can be returned as it is
# the only one.
if len(autoscalers) == 1:
return autoscalers[0]
else:
raise exceptions.ToolException(
'More than one Autoscaler with given targe.')
return None
def AddAutoscalersToMigs(migs_iterator, project, compute, http,
batch_url, fail_when_api_not_supported=True):
"""Add Autoscaler to each IGM object if autoscaling is enabled for it."""
migs = list(migs_iterator)
zone_names = set([path_simplifier.Name(mig['zone'])
for mig in migs if 'zone' in mig])
region_names = set([path_simplifier.Name(mig['region'])
for mig in migs if 'region' in mig])
autoscalers = {}
all_autoscalers = AutoscalersForLocations(
zones=zone_names,
regions=region_names,
project=project,
compute=compute,
http=http,
batch_url=batch_url,
fail_when_api_not_supported=fail_when_api_not_supported)
for scope_name in list(zone_names) + list(region_names):
autoscalers[scope_name] = []
for autoscaler in all_autoscalers:
autoscaler_scope = None
if autoscaler.zone is not None:
autoscaler_scope = path_simplifier.Name(autoscaler.zone)
if hasattr(autoscaler, 'region') and autoscaler.region is not None:
autoscaler_scope = path_simplifier.Name(autoscaler.region)
if autoscaler_scope is not None:
autoscalers.setdefault(autoscaler_scope, [])
autoscalers[autoscaler_scope].append(autoscaler)
for mig in migs:
scope_name = None
scope_type = None
if 'region' in mig:
scope_name = path_simplifier.Name(mig['region'])
scope_type = 'region'
elif 'zone' in mig:
scope_name = path_simplifier.Name(mig['zone'])
scope_type = 'zone'
autoscaler = None
if scope_name and scope_type:
autoscaler = AutoscalerForMig(
mig_name=mig['name'],
autoscalers=autoscalers[scope_name],
project=project,
scope_name=scope_name,
scope_type=scope_type)
if autoscaler:
mig['autoscaler'] = autoscaler
yield mig
def _BuildCpuUtilization(args, messages):
if args.target_cpu_utilization:
return messages.AutoscalingPolicyCpuUtilization(
utilizationTarget=args.target_cpu_utilization,
)
if args.scale_based_on_cpu:
return messages.AutoscalingPolicyCpuUtilization()
return None
def _BuildCustomMetricUtilizations(args, messages):
"""Builds custom metric utilization policy list from args.
Args:
args: command line arguments.
messages: module containing message classes.
Returns:
AutoscalingPolicyCustomMetricUtilization list.
"""
result = []
if args.custom_metric_utilization:
for custom_metric_utilization in args.custom_metric_utilization:
result.append(
messages.AutoscalingPolicyCustomMetricUtilization(
utilizationTarget=custom_metric_utilization[
'utilization-target'],
metric=custom_metric_utilization['metric'],
utilizationTargetType=(
messages
.AutoscalingPolicyCustomMetricUtilization
.UtilizationTargetTypeValueValuesEnum(
custom_metric_utilization['utilization-target-type'],
)
),
)
)
return result
def _BuildLoadBalancingUtilization(args, messages):
if args.target_load_balancing_utilization:
return messages.AutoscalingPolicyLoadBalancingUtilization(
utilizationTarget=args.target_load_balancing_utilization,
)
if args.scale_based_on_load_balancing:
return messages.AutoscalingPolicyLoadBalancingUtilization()
return None
def _BuildQueueBasedScaling(args, messages):
"""Builds queue based scaling policy from args.
Args:
args: command line arguments.
messages: module containing message classes.
Returns:
AutoscalingPolicyQueueBasedScaling message object or None.
"""
if not ArgsSupportQueueScaling(args):
return None
queue_policy_dict = {}
if args.queue_scaling_cloud_pub_sub:
queue_policy_dict['cloudPubSub'] = (
messages.AutoscalingPolicyQueueBasedScalingCloudPubSub(
topic=args.queue_scaling_cloud_pub_sub['topic'],
subscription=args.queue_scaling_cloud_pub_sub['subscription']))
else:
return None # No queue spec.
if args.queue_scaling_acceptable_backlog_per_instance is not None:
queue_policy_dict['acceptableBacklogPerInstance'] = (
args.queue_scaling_acceptable_backlog_per_instance)
else:
return None # No queue target.
if args.queue_scaling_single_worker_throughput is not None:
queue_policy_dict['singleWorkerThroughputPerSec'] = (
args.queue_scaling_single_worker_throughput)
return messages.AutoscalingPolicyQueueBasedScaling(**queue_policy_dict)
def _BuildAutoscalerPolicy(args, messages):
"""Builds AutoscalingPolicy from args.
Args:
args: command line arguments.
messages: module containing message classes.
Returns:
AutoscalingPolicy message object.
"""
policy_dict = {
'coolDownPeriodSec': args.cool_down_period,
'cpuUtilization': _BuildCpuUtilization(args, messages),
'customMetricUtilizations': _BuildCustomMetricUtilizations(args,
messages),
'loadBalancingUtilization': _BuildLoadBalancingUtilization(args,
messages),
'queueBasedScaling': _BuildQueueBasedScaling(args, messages),
'maxNumReplicas': args.max_num_replicas,
'minNumReplicas': args.min_num_replicas,
}
return messages.AutoscalingPolicy(
**dict((key, value) for key, value in policy_dict.iteritems()
if value is not None)) # Filter out None values.
def AdjustAutoscalerNameForCreation(autoscaler_resource):
trimmed_name = autoscaler_resource.name[
0:(_MAX_AUTOSCALER_NAME_LENGTH - _NUM_RANDOM_CHARACTERS_IN_AS_NAME - 1)]
random_characters = [
random.choice(string.lowercase + string.digits)
for _ in range(_NUM_RANDOM_CHARACTERS_IN_AS_NAME)
]
random_suffix = ''.join(random_characters)
new_name = '{0}-{1}'.format(trimmed_name, random_suffix)
autoscaler_resource.name = new_name
def BuildAutoscaler(args, messages, igm_ref, name, zone=None, region=None):
"""Builds autoscaler message protocol buffer."""
autoscaler = messages.Autoscaler(
autoscalingPolicy=_BuildAutoscalerPolicy(args, messages),
description=args.description,
name=name,
target=igm_ref.SelfLink(),
)
if zone:
autoscaler.zone = zone
if region:
autoscaler.region = region
return autoscaler
def AddAutohealingArgs(parser):
"""Adds autohealing-related commandline arguments to parser."""
health_check_group = parser.add_mutually_exclusive_group()
health_check_group.add_argument(
'--http-health-check',
help=('Specifies the HTTP health check object used for autohealing '
'instances in this group.'))
health_check_group.add_argument(
'--https-health-check',
help=('Specifies the HTTPS health check object used for autohealing '
'instances in this group.'))
initial_delay = parser.add_argument(
'--initial-delay',
type=arg_parsers.Duration(),
help=('Specifies the length of the period during which the instance '
'is known to be initializing and should not be autohealed even '
'if unhealthy.'))
initial_delay.detailed_help = """\
Specifies the length of the period during which the instance is known to
be initializing and should not be autohealed even if unhealthy.
Valid units for this flag are ``s'' for seconds, ``m'' for minutes and
``h'' for hours. If no unit is specified, seconds is assumed. This value
cannot be greater than 1 hour.
"""
def CreateAutohealingPolicies(resources, messages, args):
"""Creates autohealing policy list from args."""
if hasattr(args, 'http_health_check'): # alpha or beta
if args.http_health_check or args.https_health_check or args.initial_delay:
policy = messages.InstanceGroupManagerAutoHealingPolicy()
if args.http_health_check:
health_check_ref = resources.Parse(
args.http_health_check,
collection='compute.httpHealthChecks')
policy.healthCheck = health_check_ref.SelfLink()
elif args.https_health_check:
health_check_ref = resources.Parse(
args.https_health_check,
collection='compute.httpsHealthChecks')
policy.healthCheck = health_check_ref.SelfLink()
if args.initial_delay:
policy.initialDelaySec = args.initial_delay
return [policy]
return []
def _GetInstanceTemplatesSet(*versions_lists):
versions_set = set()
for versions_list in versions_lists:
versions_set.update(versions_list)
return versions_set
def ValidateVersions(igm_info, new_versions, force=False):
"""Validates whether versions provided by user are consistent.
Args:
igm_info: instance group manager resource.
new_versions: list of new versions.
force: if true, we allow any combination of instance templates, as long as
they are different. If false, only the following transitions are allowed:
X -> Y, X -> (X, Y), (X, Y) -> X, (X, Y) -> Y, (X, Y) -> (X, Y)
"""
if (len(new_versions) == 2
and new_versions[0].instanceTemplate == new_versions[1].instanceTemplate):
raise exceptions.ToolException(
'Provided instance templates must be different.')
if force:
return
# Only X -> Y, X -> (X, Y), (X, Y) -> X, (X, Y) -> Y, (X, Y) -> (X, Y)
# are allowed in gcloud (unless --force)
# Equivalently, at most two versions in old and new versions set union
if igm_info.versions:
igm_templates = [version.instanceTemplate for version in igm_info.versions]
elif igm_info.instanceTemplate:
igm_templates = [igm_info.instanceTemplate]
else:
raise exceptions.ToolException(
'Either versions or instance template must be specified for '
'managed instance group.')
new_templates = [version.instanceTemplate for version in new_versions]
version_count = len(_GetInstanceTemplatesSet(igm_templates, new_templates))
if version_count > 2:
raise exceptions.ToolException(
'Update inconsistent with current state. '
'The only allowed transitions between versions are: '
'X -> Y, X -> (X, Y), (X, Y) -> X, (X, Y) -> Y, (X, Y) -> (X, Y). '
'Please check versions templates or use --force.')
| en | 0.759545 | # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Common functions and classes for dealing with managed instances groups. # 4 character chosen from between lowercase letters and numbers give >1.6M # possibilities with no more than 100 Autoscalers in one Zone and Project # so probability that adding an autoscaler will fail because of name conflict # is about 6e-5. Adds commandline arguments to parser. Adds a target metric value for the to the Autoscaler. *metric*::: Protocol-free URL of a Google Cloud Monitoring metric. *utilization-target*::: Value of the metric Autoscaler will aim to maintain (greater than 0.0). *utilization-target-type*::: How target is expressed. Valid values: {0}. Specifies queue-based scaling based on a Cloud Pub/Sub queuing system. Both topic and subscription are required. *topic*::: Topic specification. Can be just a name or a partial URL (starting with "projects/..."). Topic must belong to the same project as Autoscaler. *subscription*::: Subscription specification. Can be just a name or a partial URL (starting with "projects/..."). Subscription must belong to the same project as Autoscaler and must be connected to the specified topic. Validate Cloud Pub/Sub resource spec format. Validates args. Retrieves the given Instance Group Manager if possible. Args: igm_ref: reference to the Instance Group Manager. project: project owning resources. compute: module representing compute api. http: communication channel. batch_url: batch url. Returns: Instance Group Manager object. # Run throught the generator to actually make the requests and get potential # errors. Finds all Autoscalers defined for a given project and zones. Finds all Autoscalers defined for a given project and locations. Args: zones: target zones regions: target regions project: project owning resources. compute: module representing compute api. http: communication channel. batch_url: batch url. fail_when_api_not_supported: If true, raise tool exception if API does not support autoscaling. Returns: A list of Autoscaler objects. # Errors is passed through library calls and modified with # (ERROR_CODE, ERROR_MESSAGE) tuples. # Explicit list() is required to unwind the generator and make sure errors # are detected at this level. Finds Autoscalers with target amongst given IGMs. Args: migs: List of triples (IGM name, scope type, scope name). autoscalers: A list of Autoscalers to search among. project: Project owning resources. Returns: A list of all Autoscalers with target on mig_names list. Finds Autoscaler targetting given IGM. Args: mig_name: Name of MIG targetted by Autoscaler. autoscalers: A list of Autoscalers to search among. project: Project owning resources. scope_name: Target scope. scope_type: Target scope type. Returns: Autoscaler object for autoscaling the given Instance Group Manager or None when such Autoscaler does not exist. # For each Instance Group Manager there can be at most one Autoscaler having # the Manager as a target, so when one is found it can be returned as it is # the only one. Add Autoscaler to each IGM object if autoscaling is enabled for it. Builds custom metric utilization policy list from args. Args: args: command line arguments. messages: module containing message classes. Returns: AutoscalingPolicyCustomMetricUtilization list. Builds queue based scaling policy from args. Args: args: command line arguments. messages: module containing message classes. Returns: AutoscalingPolicyQueueBasedScaling message object or None. # No queue spec. # No queue target. Builds AutoscalingPolicy from args. Args: args: command line arguments. messages: module containing message classes. Returns: AutoscalingPolicy message object. # Filter out None values. Builds autoscaler message protocol buffer. Adds autohealing-related commandline arguments to parser. \ Specifies the length of the period during which the instance is known to be initializing and should not be autohealed even if unhealthy. Valid units for this flag are ``s'' for seconds, ``m'' for minutes and ``h'' for hours. If no unit is specified, seconds is assumed. This value cannot be greater than 1 hour. Creates autohealing policy list from args. # alpha or beta Validates whether versions provided by user are consistent. Args: igm_info: instance group manager resource. new_versions: list of new versions. force: if true, we allow any combination of instance templates, as long as they are different. If false, only the following transitions are allowed: X -> Y, X -> (X, Y), (X, Y) -> X, (X, Y) -> Y, (X, Y) -> (X, Y) # Only X -> Y, X -> (X, Y), (X, Y) -> X, (X, Y) -> Y, (X, Y) -> (X, Y) # are allowed in gcloud (unless --force) # Equivalently, at most two versions in old and new versions set union | 1.70726 | 2 |
project_version/constants.py | dmytrostriletskyi/project-version | 7 | 6633237 | """
Provide constant for command line interface.
"""
SUCCESSFUL_EXIT_CODE = 0
FAILED_EXIT_CODE = 1
GIT_HUB_PROVIDER = 'GitHub'
SUPPORTED_PROVIDERS = [GIT_HUB_PROVIDER]
| """
Provide constant for command line interface.
"""
SUCCESSFUL_EXIT_CODE = 0
FAILED_EXIT_CODE = 1
GIT_HUB_PROVIDER = 'GitHub'
SUPPORTED_PROVIDERS = [GIT_HUB_PROVIDER]
| en | 0.852776 | Provide constant for command line interface. | 1.035439 | 1 |
tony-examples/horovod-on-tony/horovod_debug_driver.py | ashahab/TonY | 645 | 6633238 | <filename>tony-examples/horovod-on-tony/horovod_debug_driver.py
#
# Copyright 2021 LinkedIn Corporation. All rights reserved. Licensed under the
# BSD-2 Clause license. See LICENSE in the project root for license information.
#
import os
import logging
import time
from optparse import OptionParser
import sys
import signal
import json
try:
import horovod.tensorflow as hvd
from horovod.runner import gloo_run
from horovod.runner.http.http_server import RendezvousServer
from horovod.runner.common.util.hosts import get_host_assignments, parse_hosts
from horovod.runner.elastic import discovery
from horovod.runner.elastic.rendezvous import create_rendezvous_handler
from horovod.runner.elastic.driver import ElasticDriver
except Exception as e:
logging.error("Horovod is not installed. See README for instructions to install it")
pass
PORT_FILE_NAME_SUFFIX = "____HOROVOD_RENDEZVOUS_SERVER____"
default_worker_list = os.getenv("CLUSTER_WORKER_LIST")
print("Print worker_list:")
print(default_worker_list)
default_output_path = os.getenv("DRIVER_OUTPUT_PATH")
print("Print output path:")
print(default_worker_list)
def elastic_driver_fn():
pass
def static_driver_fn():
global_rendezv = RendezvousServer(verbose=1)
global_rendezv_port = global_rendezv.start()
print("Rendezvous server started, port: " + str(global_rendezv_port))
# worker_list = "localhost:1"
hosts = parse_hosts(worker_list)
host_alloc_plan = get_host_assignments(hosts, 1)
global_rendezv.init(host_alloc_plan)
return (global_rendezv_port, host_alloc_plan)
def _build_fake_host_plan():
hostname = worker_list.split(":")[0]
return [
{
"hostname": hostname,
"rank": "0",
"localRank": "0",
"crossRank": "0",
"size": "2",
"localSize": "2",
"crossSize": "1"
},
{
"hostname": hostname,
"rank": "1",
"localRank": "1",
"crossRank": "1",
"size": "2",
"localSize": "2",
"crossSize": "1"
}
]
def _get_host_plan_json(host_alloc_plan):
if host_alloc_plan == None:
return json.dumps(_build_fake_host_plan())
hosts = []
for plan in host_alloc_plan:
hosts.append({
"hostname": plan.hostname,
"rank": plan.rank,
"localRank": plan.local_rank,
"crossRank": plan.cross_rank,
"size": plan.size,
"localSize": plan.local_size,
"crossSize": plan.cross_size
})
print("Host alloc plan: \n" + json.dumps(hosts))
return json.dumps(hosts)
def set_option():
parser = OptionParser()
parser.add_option(
"-a", "--num_proc", dest="num_process", type="str", help="number process of training", default="1")
parser.add_option(
"-w", "--worker_list", dest="worker_list", type="str", help="worker list", default=default_worker_list
)
parser.add_option(
"-e", action="store_true", help="enable elastic training.", dest="enable_elastic", default=False
)
parser.add_option(
"-t", action="store_true", help="is in test mode", dest="is_in_test_mode", default=False
)
parser.add_option(
"-p", "--fake_port", dest="fake_port", type="str", help="fake server port for TonY unit test"
)
parser.add_option(
"-f", action="store_true", help="fast fail in test mode for TonY unit test", dest="is_fast_fail", default=False
)
(options, args) = parser.parse_args(sys.argv)
global worker_list
worker_list = options.worker_list
global enable_elastic
enable_elastic = options.enable_elastic
print("Enable elastic: " + str(enable_elastic))
global is_in_test_mode
is_in_test_mode = options.is_in_test_mode
global fake_server_port
global is_fast_fail
is_fast_fail = False
if is_in_test_mode:
fake_server_port = options.fake_port
is_fast_fail = options.is_fast_fail
def __port_file_path(port):
path_dir = default_output_path
port_file_path = os.path.join(path_dir, str(port) + PORT_FILE_NAME_SUFFIX)
return port_file_path
def create_port_file(port, host_alloc_plan):
port_file = __port_file_path(port)
logging.info("Creating port file %s", port_file)
with open(__port_file_path(port), 'w') as fo:
fo.write(_get_host_plan_json(host_alloc_plan))
logging.info("Port file for %s created", port_file)
pass
def delete_port_file(port):
port_file = __port_file_path(port)
logging.info("Deleting port file %s", port_file)
try:
os.remove(__port_file_path(port))
logging.info("Port file %s deleted", port_file)
except OSError:
pass
def handle_exit(*args):
try:
logging.info("Closing rendezvous server...")
# todo: Close rendezvous server.
logging.info("Closed rendezvous server")
delete_port_file(port)
except:
logging.exception("Failed to close rendezvous server")
sys.exit(0)
if __name__ == '__main__':
set_option()
# Just for Unit Test
if is_fast_fail:
sys.exit(1)
try:
global port
if enable_elastic:
elastic_driver_fn()
else:
if is_in_test_mode:
print("In unit test mode. fake port: " + fake_server_port)
(port, host_alloc_plan) = (fake_server_port, None)
else:
(port, host_alloc_plan) = static_driver_fn()
create_port_file(port, host_alloc_plan)
except:
logging.exception("Errors on starting horovod rendezvous server.")
handle_exit()
signal.signal(signal.SIGTERM, handle_exit)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGILL, handle_exit)
while True:
time.sleep(10)
| <filename>tony-examples/horovod-on-tony/horovod_debug_driver.py
#
# Copyright 2021 LinkedIn Corporation. All rights reserved. Licensed under the
# BSD-2 Clause license. See LICENSE in the project root for license information.
#
import os
import logging
import time
from optparse import OptionParser
import sys
import signal
import json
try:
import horovod.tensorflow as hvd
from horovod.runner import gloo_run
from horovod.runner.http.http_server import RendezvousServer
from horovod.runner.common.util.hosts import get_host_assignments, parse_hosts
from horovod.runner.elastic import discovery
from horovod.runner.elastic.rendezvous import create_rendezvous_handler
from horovod.runner.elastic.driver import ElasticDriver
except Exception as e:
logging.error("Horovod is not installed. See README for instructions to install it")
pass
PORT_FILE_NAME_SUFFIX = "____HOROVOD_RENDEZVOUS_SERVER____"
default_worker_list = os.getenv("CLUSTER_WORKER_LIST")
print("Print worker_list:")
print(default_worker_list)
default_output_path = os.getenv("DRIVER_OUTPUT_PATH")
print("Print output path:")
print(default_worker_list)
def elastic_driver_fn():
pass
def static_driver_fn():
global_rendezv = RendezvousServer(verbose=1)
global_rendezv_port = global_rendezv.start()
print("Rendezvous server started, port: " + str(global_rendezv_port))
# worker_list = "localhost:1"
hosts = parse_hosts(worker_list)
host_alloc_plan = get_host_assignments(hosts, 1)
global_rendezv.init(host_alloc_plan)
return (global_rendezv_port, host_alloc_plan)
def _build_fake_host_plan():
hostname = worker_list.split(":")[0]
return [
{
"hostname": hostname,
"rank": "0",
"localRank": "0",
"crossRank": "0",
"size": "2",
"localSize": "2",
"crossSize": "1"
},
{
"hostname": hostname,
"rank": "1",
"localRank": "1",
"crossRank": "1",
"size": "2",
"localSize": "2",
"crossSize": "1"
}
]
def _get_host_plan_json(host_alloc_plan):
if host_alloc_plan == None:
return json.dumps(_build_fake_host_plan())
hosts = []
for plan in host_alloc_plan:
hosts.append({
"hostname": plan.hostname,
"rank": plan.rank,
"localRank": plan.local_rank,
"crossRank": plan.cross_rank,
"size": plan.size,
"localSize": plan.local_size,
"crossSize": plan.cross_size
})
print("Host alloc plan: \n" + json.dumps(hosts))
return json.dumps(hosts)
def set_option():
parser = OptionParser()
parser.add_option(
"-a", "--num_proc", dest="num_process", type="str", help="number process of training", default="1")
parser.add_option(
"-w", "--worker_list", dest="worker_list", type="str", help="worker list", default=default_worker_list
)
parser.add_option(
"-e", action="store_true", help="enable elastic training.", dest="enable_elastic", default=False
)
parser.add_option(
"-t", action="store_true", help="is in test mode", dest="is_in_test_mode", default=False
)
parser.add_option(
"-p", "--fake_port", dest="fake_port", type="str", help="fake server port for TonY unit test"
)
parser.add_option(
"-f", action="store_true", help="fast fail in test mode for TonY unit test", dest="is_fast_fail", default=False
)
(options, args) = parser.parse_args(sys.argv)
global worker_list
worker_list = options.worker_list
global enable_elastic
enable_elastic = options.enable_elastic
print("Enable elastic: " + str(enable_elastic))
global is_in_test_mode
is_in_test_mode = options.is_in_test_mode
global fake_server_port
global is_fast_fail
is_fast_fail = False
if is_in_test_mode:
fake_server_port = options.fake_port
is_fast_fail = options.is_fast_fail
def __port_file_path(port):
path_dir = default_output_path
port_file_path = os.path.join(path_dir, str(port) + PORT_FILE_NAME_SUFFIX)
return port_file_path
def create_port_file(port, host_alloc_plan):
port_file = __port_file_path(port)
logging.info("Creating port file %s", port_file)
with open(__port_file_path(port), 'w') as fo:
fo.write(_get_host_plan_json(host_alloc_plan))
logging.info("Port file for %s created", port_file)
pass
def delete_port_file(port):
port_file = __port_file_path(port)
logging.info("Deleting port file %s", port_file)
try:
os.remove(__port_file_path(port))
logging.info("Port file %s deleted", port_file)
except OSError:
pass
def handle_exit(*args):
try:
logging.info("Closing rendezvous server...")
# todo: Close rendezvous server.
logging.info("Closed rendezvous server")
delete_port_file(port)
except:
logging.exception("Failed to close rendezvous server")
sys.exit(0)
if __name__ == '__main__':
set_option()
# Just for Unit Test
if is_fast_fail:
sys.exit(1)
try:
global port
if enable_elastic:
elastic_driver_fn()
else:
if is_in_test_mode:
print("In unit test mode. fake port: " + fake_server_port)
(port, host_alloc_plan) = (fake_server_port, None)
else:
(port, host_alloc_plan) = static_driver_fn()
create_port_file(port, host_alloc_plan)
except:
logging.exception("Errors on starting horovod rendezvous server.")
handle_exit()
signal.signal(signal.SIGTERM, handle_exit)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGILL, handle_exit)
while True:
time.sleep(10)
| en | 0.751865 | # # Copyright 2021 LinkedIn Corporation. All rights reserved. Licensed under the # BSD-2 Clause license. See LICENSE in the project root for license information. # # worker_list = "localhost:1" # todo: Close rendezvous server. # Just for Unit Test | 2.036106 | 2 |
pyro/contrib/epidemiology/__init__.py | garrett-bernstein/pyro | 0 | 6633239 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from .compartmental import CompartmentalModel
from .distributions import beta_binomial_dist, binomial_dist, infection_dist
__all__ = [
"CompartmentalModel",
"beta_binomial_dist",
"binomial_dist",
"infection_dist",
]
| # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from .compartmental import CompartmentalModel
from .distributions import beta_binomial_dist, binomial_dist, infection_dist
__all__ = [
"CompartmentalModel",
"beta_binomial_dist",
"binomial_dist",
"infection_dist",
]
| en | 0.459 | # Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 | 1.145195 | 1 |
src/ext/api/__init__.py | uesleicarvalhoo/FlaskBoilerplate | 0 | 6633240 | from flask import Flask
def init_app(app: Flask) -> None:
pass
| from flask import Flask
def init_app(app: Flask) -> None:
pass
| none | 1 | 1.089447 | 1 |
|
setup.py | tgadf/movies | 0 | 6633241 | from distutils.core import setup
import setuptools
setup(
name = 'movies',
py_modules = ['AACTA',
'filmsite',
'razzies',
'BAFTA',
'flops',
'rollingstone',
'SAG',
'goldenglobes',
'rottentomatoes',
'amc',
'movieDB',
'setup',
'boxofficemojo',
'movieRenames',
'ultimatemovierankings',
'canada',
'movies',
'wikifilm',
'combine',
'mymovies',
'wikipedia',
'films101',
'oscar'],
version = '0.0.1',
description = 'My Movie Parser',
long_description = open('README.md').read(),
author = '<NAME>',
author_email = '<EMAIL>',
license = "MIT",
url = 'https://github.com/tgadf/movies',
keywords = ['movies'],
classifiers = [
'Development Status :: 3',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
],
install_requires=['utils==0.0.1'],
dependency_links=['git+ssh://git@github.com/tgadf/utils.git#egg=utils-0.0.1']
)
| from distutils.core import setup
import setuptools
setup(
name = 'movies',
py_modules = ['AACTA',
'filmsite',
'razzies',
'BAFTA',
'flops',
'rollingstone',
'SAG',
'goldenglobes',
'rottentomatoes',
'amc',
'movieDB',
'setup',
'boxofficemojo',
'movieRenames',
'ultimatemovierankings',
'canada',
'movies',
'wikifilm',
'combine',
'mymovies',
'wikipedia',
'films101',
'oscar'],
version = '0.0.1',
description = 'My Movie Parser',
long_description = open('README.md').read(),
author = '<NAME>',
author_email = '<EMAIL>',
license = "MIT",
url = 'https://github.com/tgadf/movies',
keywords = ['movies'],
classifiers = [
'Development Status :: 3',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
],
install_requires=['utils==0.0.1'],
dependency_links=['git+ssh://git@github.com/tgadf/utils.git#egg=utils-0.0.1']
)
| zh | 0.347785 | #egg=utils-0.0.1'] | 1.232881 | 1 |
tests/run_py27.py | radluz/fakear | 2 | 6633242 | import subprocess
class PopenAdapter:
def __init__(self, stdout, stderr, retcode):
self.stdout = stdout
self.stderr = stderr
self.returncode = retcode
def run(*args, **kwargs):
p = subprocess.Popen(*args, **kwargs)
out, err = p.communicate()
return PopenAdapter(out, err, p.returncode) | import subprocess
class PopenAdapter:
def __init__(self, stdout, stderr, retcode):
self.stdout = stdout
self.stderr = stderr
self.returncode = retcode
def run(*args, **kwargs):
p = subprocess.Popen(*args, **kwargs)
out, err = p.communicate()
return PopenAdapter(out, err, p.returncode) | none | 1 | 2.62185 | 3 |
|
SpectroscoPy/Spectrum.py | faradaymahe/Phonopy-Spectroscopy | 94 | 6633243 | <filename>SpectroscoPy/Spectrum.py
# SpectroscoPy/Spectrum.py
# ---------
# Docstring
# ---------
""" Core routines for simulating spectra. """
# -------
# Imports
# -------
import math;
import numpy as np;
# ---------
# Constants
# ---------
""" To ensure the maximum is included in grids, np.arange() is called as np.arange(min, max + RangeStepMultiplier * step, step). """
RangeStepMultiplier = 1.0e-5;
""" A "safe" multiplier of the linewidth (e.g. sigma for Gaussian and gamma for Lorentzian functions) for padding grids used to evaluate peak functions. """
SpectrumPaddingMultiplier = 5.0;
""" Minimum number of points for automatically determining spectrum resolutions. """
SpectrumResolutionMinPoints = 1000;
# ---------
# Functions
# ---------
def Gaussian(x, i, mu, sigma):
""" Return G(x) = (i / (sigma * sqrt(2 * pi))) * exp(-1 * (x - mu) ** 2 / (2 * sigma ** 2)). """
# Definition of the Gaussian function with unit area taken from http://mathworld.wolfram.com/GaussianFunction.html.
return (i / (sigma * math.sqrt(2.0 * math.pi))) * np.exp(-1.0 * (x - mu) ** 2 / (2 * sigma ** 2));
def Lorentzian(x, i, x0, gamma):
""" Return L(x) = (i / pi) * ((0.5 * gamma) / ((x - x_0) ** 2) + (0.5 * gamma) ** 2). """
# Definition of the Lorentzian function with unit area taken from http://mathworld.wolfram.com/LorentzianFunction.html.
return (i / math.pi) * ((0.5 * gamma) / ((x - x0) ** 2 + (0.5 * gamma) ** 2));
def SimulateSpectrum(
frequencies, intensities, linewidths,
spectrumRange = None, spectrumResolution = None,
instrumentBroadening = None, instrumentBroadeningShape = 'gaussian'
):
"""
Given a set of mode frequencies and intensities plus a nominal linewidth or set of linewidths, simulate a spectrum.
Optionally, apply an instrument broadening by convolving with a fixed-width Gaussian or Lorentzian function.
Arguments:
frequencies -- mode frequencies.
intensities -- mode spectroscopic intensities.
linewidths -- nominal mode linewidth or list of mode linewidths, taken to be the full-width at half-maxima (FWHM) of Lorentzian peak profiles.
Keyword arguments:
spectrumRange -- range of frequencies over which to simulate the spectrum (defaults to approx. min(frequencies) - SpectrumPaddingMultiplier * max(linewidths) -> max(frequencies) + SpectrumPaddingMultiplier * max(linewidths)).
spectrumResolution -- frequency resolution of the spectrum (default: adjust to give at least SpectrumResolutionMinPoints data points, or a minimum resolution of 1, whichever is larger).
instrumentBroadening -- instrument broadening width (default: no instrument broadening).
instrumentBroadeningShape -- shape of instrument broadening ('gaussian' or 'lorentzian'; default: 'gaussian').
Return value:
A tuple of (spectrumX, spectrumY, spectrumYNorm) data.
Notes:
If a min and max are specified with spectrumRange, they may be modified to "align" with the resolution (i.e. so that the frequency axis starts and finished on an integer multiple of the resolution).
"""
numModes = len(frequencies);
if len(intensities) != numModes:
raise Exception("Error: The lengths of frequencies and intensities are inconsistent.");
try:
# Assume linewidths is a scalar linewidth and repeat it to a list.
linewidth = float(linewidths);
linewidths = [linewidth] * numModes;
except TypeError:
# If linewidths is a list or multi-element NumPy array, casting to a float will raise a TypeError.
if len(linewidths) != numModes:
raise Exception("Error: The lengths of frequencies and linewidths are inconsistent.");
# Set a minimum and maximum.
spectrumMin, spectrumMax = None, None;
if spectrumRange == None:
maxLinewidth = max(linewidths);
if numModes == 1 and maxLinewidth == 0.0:
raise Exception("Error: Cannot determine a spectrum range automatically - please specify manually using the spectrumRange argument.");
# +/- 5 sigma/gamma should be sufficient for this.
spectrumMin = min(frequencies) - SpectrumPaddingMultiplier * maxLinewidth;
spectrumMax = max(frequencies) + SpectrumPaddingMultiplier * maxLinewidth;
else:
spectrumMin, spectrumMax = spectrumRange;
if spectrumMin == spectrumMax:
raise Exception("Error: The min and max specified in spectrumRange are the same.");
if spectrumMax < spectrumMin:
spectrumMax, spectrumMin = spectrumMin, spectrumMax;
# Set a resolution if required.
if spectrumResolution == None:
nominalResolution = math.pow(
10.0, math.floor(math.log10(math.ceil(spectrumMax - spectrumMin) / SpectrumResolutionMinPoints))
);
spectrumResolution = min(nominalResolution, 1.0);
# If the spectrum range is being automatically determined, make sure it is "aligned" with the resolution.
if spectrumRange == None:
spectrumMin = spectrumResolution * math.floor(spectrumMin / spectrumResolution);
spectrumMax = spectrumResolution * math.ceil(spectrumMax / spectrumResolution);
# If applying instrument broadening, calculate the convolution kernel.
# Also expand the range of the spectrum so the the convolution kernel does not produce boundary effects inside the selected region.
convNumPoints, convKernel = None, None;
if instrumentBroadening != None:
# Calculating the convolution kernel to +/- 5 sigma/gamma should be sufficient.
# According to https://en.wikipedia.org/wiki/Gaussian_blur, +/- 3 sigma is considered enough for Gaussian blurring kernels.
convNumPoints = int(
math.ceil(SpectrumPaddingMultiplier * instrumentBroadening / spectrumResolution)
);
convX = np.arange(
-1.0 * convNumPoints * spectrumResolution, (convNumPoints + 1.0e-5) * spectrumResolution, spectrumResolution
);
if instrumentBroadeningShape == 'gaussian':
convKernel = Gaussian(convX, 1.0, 0.0, instrumentBroadening);
elif instrumentBroadeningShape == 'lorentzian':
convKernel = Lorentzian(convX, 1.0, 0.0, instrumentBroadening);
else:
raise Exception("Error: Unrecognised instrumentBroadeningShape '{0}'.".format(instrumentBroadeningShape));
convKernel /= convKernel.sum();
spectrumMin = spectrumMin - convNumPoints * spectrumResolution;
spectrumMax = spectrumMax + convNumPoints * spectrumResolution;
# Simulate spectrum.
spectrumX = np.arange(spectrumMin, spectrumMax + 1.0e-5 * spectrumResolution, spectrumResolution, dtype = np.float64);
spectrumY = np.zeros_like(spectrumX, dtype = np.float64);
for frequency, intensity, linewidth in zip(frequencies, intensities, linewidths):
spectrumY += Lorentzian(spectrumX, intensity, frequency, linewidth);
# Apply instrument broadening if required.
if convKernel is not None:
# mode = 'valid' will cause np.convolve() to trim the first and last convNumPoints data points from the spectrum.
spectrumX = spectrumX[convNumPoints:-convNumPoints];
spectrumY = np.convolve(spectrumY, convKernel, mode = 'valid');
# Just in case my maths went wrong somewhere...
assert len(spectrumX) == len(spectrumY);
# Normalise spectrum.
spectrumYNorm = spectrumY / math.fabs(spectrumY.max())
# Return simulated spectrum.
return (spectrumX, spectrumY, spectrumYNorm);
| <filename>SpectroscoPy/Spectrum.py
# SpectroscoPy/Spectrum.py
# ---------
# Docstring
# ---------
""" Core routines for simulating spectra. """
# -------
# Imports
# -------
import math;
import numpy as np;
# ---------
# Constants
# ---------
""" To ensure the maximum is included in grids, np.arange() is called as np.arange(min, max + RangeStepMultiplier * step, step). """
RangeStepMultiplier = 1.0e-5;
""" A "safe" multiplier of the linewidth (e.g. sigma for Gaussian and gamma for Lorentzian functions) for padding grids used to evaluate peak functions. """
SpectrumPaddingMultiplier = 5.0;
""" Minimum number of points for automatically determining spectrum resolutions. """
SpectrumResolutionMinPoints = 1000;
# ---------
# Functions
# ---------
def Gaussian(x, i, mu, sigma):
""" Return G(x) = (i / (sigma * sqrt(2 * pi))) * exp(-1 * (x - mu) ** 2 / (2 * sigma ** 2)). """
# Definition of the Gaussian function with unit area taken from http://mathworld.wolfram.com/GaussianFunction.html.
return (i / (sigma * math.sqrt(2.0 * math.pi))) * np.exp(-1.0 * (x - mu) ** 2 / (2 * sigma ** 2));
def Lorentzian(x, i, x0, gamma):
""" Return L(x) = (i / pi) * ((0.5 * gamma) / ((x - x_0) ** 2) + (0.5 * gamma) ** 2). """
# Definition of the Lorentzian function with unit area taken from http://mathworld.wolfram.com/LorentzianFunction.html.
return (i / math.pi) * ((0.5 * gamma) / ((x - x0) ** 2 + (0.5 * gamma) ** 2));
def SimulateSpectrum(
frequencies, intensities, linewidths,
spectrumRange = None, spectrumResolution = None,
instrumentBroadening = None, instrumentBroadeningShape = 'gaussian'
):
"""
Given a set of mode frequencies and intensities plus a nominal linewidth or set of linewidths, simulate a spectrum.
Optionally, apply an instrument broadening by convolving with a fixed-width Gaussian or Lorentzian function.
Arguments:
frequencies -- mode frequencies.
intensities -- mode spectroscopic intensities.
linewidths -- nominal mode linewidth or list of mode linewidths, taken to be the full-width at half-maxima (FWHM) of Lorentzian peak profiles.
Keyword arguments:
spectrumRange -- range of frequencies over which to simulate the spectrum (defaults to approx. min(frequencies) - SpectrumPaddingMultiplier * max(linewidths) -> max(frequencies) + SpectrumPaddingMultiplier * max(linewidths)).
spectrumResolution -- frequency resolution of the spectrum (default: adjust to give at least SpectrumResolutionMinPoints data points, or a minimum resolution of 1, whichever is larger).
instrumentBroadening -- instrument broadening width (default: no instrument broadening).
instrumentBroadeningShape -- shape of instrument broadening ('gaussian' or 'lorentzian'; default: 'gaussian').
Return value:
A tuple of (spectrumX, spectrumY, spectrumYNorm) data.
Notes:
If a min and max are specified with spectrumRange, they may be modified to "align" with the resolution (i.e. so that the frequency axis starts and finished on an integer multiple of the resolution).
"""
numModes = len(frequencies);
if len(intensities) != numModes:
raise Exception("Error: The lengths of frequencies and intensities are inconsistent.");
try:
# Assume linewidths is a scalar linewidth and repeat it to a list.
linewidth = float(linewidths);
linewidths = [linewidth] * numModes;
except TypeError:
# If linewidths is a list or multi-element NumPy array, casting to a float will raise a TypeError.
if len(linewidths) != numModes:
raise Exception("Error: The lengths of frequencies and linewidths are inconsistent.");
# Set a minimum and maximum.
spectrumMin, spectrumMax = None, None;
if spectrumRange == None:
maxLinewidth = max(linewidths);
if numModes == 1 and maxLinewidth == 0.0:
raise Exception("Error: Cannot determine a spectrum range automatically - please specify manually using the spectrumRange argument.");
# +/- 5 sigma/gamma should be sufficient for this.
spectrumMin = min(frequencies) - SpectrumPaddingMultiplier * maxLinewidth;
spectrumMax = max(frequencies) + SpectrumPaddingMultiplier * maxLinewidth;
else:
spectrumMin, spectrumMax = spectrumRange;
if spectrumMin == spectrumMax:
raise Exception("Error: The min and max specified in spectrumRange are the same.");
if spectrumMax < spectrumMin:
spectrumMax, spectrumMin = spectrumMin, spectrumMax;
# Set a resolution if required.
if spectrumResolution == None:
nominalResolution = math.pow(
10.0, math.floor(math.log10(math.ceil(spectrumMax - spectrumMin) / SpectrumResolutionMinPoints))
);
spectrumResolution = min(nominalResolution, 1.0);
# If the spectrum range is being automatically determined, make sure it is "aligned" with the resolution.
if spectrumRange == None:
spectrumMin = spectrumResolution * math.floor(spectrumMin / spectrumResolution);
spectrumMax = spectrumResolution * math.ceil(spectrumMax / spectrumResolution);
# If applying instrument broadening, calculate the convolution kernel.
# Also expand the range of the spectrum so the the convolution kernel does not produce boundary effects inside the selected region.
convNumPoints, convKernel = None, None;
if instrumentBroadening != None:
# Calculating the convolution kernel to +/- 5 sigma/gamma should be sufficient.
# According to https://en.wikipedia.org/wiki/Gaussian_blur, +/- 3 sigma is considered enough for Gaussian blurring kernels.
convNumPoints = int(
math.ceil(SpectrumPaddingMultiplier * instrumentBroadening / spectrumResolution)
);
convX = np.arange(
-1.0 * convNumPoints * spectrumResolution, (convNumPoints + 1.0e-5) * spectrumResolution, spectrumResolution
);
if instrumentBroadeningShape == 'gaussian':
convKernel = Gaussian(convX, 1.0, 0.0, instrumentBroadening);
elif instrumentBroadeningShape == 'lorentzian':
convKernel = Lorentzian(convX, 1.0, 0.0, instrumentBroadening);
else:
raise Exception("Error: Unrecognised instrumentBroadeningShape '{0}'.".format(instrumentBroadeningShape));
convKernel /= convKernel.sum();
spectrumMin = spectrumMin - convNumPoints * spectrumResolution;
spectrumMax = spectrumMax + convNumPoints * spectrumResolution;
# Simulate spectrum.
spectrumX = np.arange(spectrumMin, spectrumMax + 1.0e-5 * spectrumResolution, spectrumResolution, dtype = np.float64);
spectrumY = np.zeros_like(spectrumX, dtype = np.float64);
for frequency, intensity, linewidth in zip(frequencies, intensities, linewidths):
spectrumY += Lorentzian(spectrumX, intensity, frequency, linewidth);
# Apply instrument broadening if required.
if convKernel is not None:
# mode = 'valid' will cause np.convolve() to trim the first and last convNumPoints data points from the spectrum.
spectrumX = spectrumX[convNumPoints:-convNumPoints];
spectrumY = np.convolve(spectrumY, convKernel, mode = 'valid');
# Just in case my maths went wrong somewhere...
assert len(spectrumX) == len(spectrumY);
# Normalise spectrum.
spectrumYNorm = spectrumY / math.fabs(spectrumY.max())
# Return simulated spectrum.
return (spectrumX, spectrumY, spectrumYNorm);
| en | 0.672855 | # SpectroscoPy/Spectrum.py # --------- # Docstring # --------- Core routines for simulating spectra. # ------- # Imports # ------- # --------- # Constants # --------- To ensure the maximum is included in grids, np.arange() is called as np.arange(min, max + RangeStepMultiplier * step, step). A "safe" multiplier of the linewidth (e.g. sigma for Gaussian and gamma for Lorentzian functions) for padding grids used to evaluate peak functions. Minimum number of points for automatically determining spectrum resolutions. # --------- # Functions # --------- Return G(x) = (i / (sigma * sqrt(2 * pi))) * exp(-1 * (x - mu) ** 2 / (2 * sigma ** 2)). # Definition of the Gaussian function with unit area taken from http://mathworld.wolfram.com/GaussianFunction.html. Return L(x) = (i / pi) * ((0.5 * gamma) / ((x - x_0) ** 2) + (0.5 * gamma) ** 2). # Definition of the Lorentzian function with unit area taken from http://mathworld.wolfram.com/LorentzianFunction.html. Given a set of mode frequencies and intensities plus a nominal linewidth or set of linewidths, simulate a spectrum. Optionally, apply an instrument broadening by convolving with a fixed-width Gaussian or Lorentzian function. Arguments: frequencies -- mode frequencies. intensities -- mode spectroscopic intensities. linewidths -- nominal mode linewidth or list of mode linewidths, taken to be the full-width at half-maxima (FWHM) of Lorentzian peak profiles. Keyword arguments: spectrumRange -- range of frequencies over which to simulate the spectrum (defaults to approx. min(frequencies) - SpectrumPaddingMultiplier * max(linewidths) -> max(frequencies) + SpectrumPaddingMultiplier * max(linewidths)). spectrumResolution -- frequency resolution of the spectrum (default: adjust to give at least SpectrumResolutionMinPoints data points, or a minimum resolution of 1, whichever is larger). instrumentBroadening -- instrument broadening width (default: no instrument broadening). instrumentBroadeningShape -- shape of instrument broadening ('gaussian' or 'lorentzian'; default: 'gaussian'). Return value: A tuple of (spectrumX, spectrumY, spectrumYNorm) data. Notes: If a min and max are specified with spectrumRange, they may be modified to "align" with the resolution (i.e. so that the frequency axis starts and finished on an integer multiple of the resolution). # Assume linewidths is a scalar linewidth and repeat it to a list. # If linewidths is a list or multi-element NumPy array, casting to a float will raise a TypeError. # Set a minimum and maximum. # +/- 5 sigma/gamma should be sufficient for this. # Set a resolution if required. # If the spectrum range is being automatically determined, make sure it is "aligned" with the resolution. # If applying instrument broadening, calculate the convolution kernel. # Also expand the range of the spectrum so the the convolution kernel does not produce boundary effects inside the selected region. # Calculating the convolution kernel to +/- 5 sigma/gamma should be sufficient. # According to https://en.wikipedia.org/wiki/Gaussian_blur, +/- 3 sigma is considered enough for Gaussian blurring kernels. # Simulate spectrum. # Apply instrument broadening if required. # mode = 'valid' will cause np.convolve() to trim the first and last convNumPoints data points from the spectrum. # Just in case my maths went wrong somewhere... # Normalise spectrum. # Return simulated spectrum. | 2.480201 | 2 |
tests/unit/services/shop/article/test_article_availability.py | homeworkprod/byceps | 23 | 6633244 | """
:Copyright: 2014-2022 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime
from decimal import Decimal
from typing import Optional
from freezegun import freeze_time
import pytest
from byceps.database import generate_uuid
from byceps.services.shop.article.transfer.models import (
Article,
ArticleID,
ArticleNumber,
ArticleType,
)
from byceps.services.shop.article.service import is_article_available_now
from byceps.services.shop.shop.transfer.models import ShopID
@pytest.mark.parametrize(
'now, expected',
[
(datetime(2014, 4, 8, 12, 0, 0), False),
(datetime(2014, 9, 15, 17, 59, 59), False),
(datetime(2014, 9, 15, 18, 0, 0), True ),
(datetime(2014, 9, 19, 15, 0, 0), True ),
(datetime(2014, 9, 23, 17, 59, 59), True ),
(datetime(2014, 9, 23, 18, 0, 0), False),
(datetime(2014, 11, 4, 12, 0, 0), False),
],
)
def test_is_available_with_start_and_end(now, expected):
article = create_article(
datetime(2014, 9, 15, 18, 0, 0),
datetime(2014, 9, 23, 18, 0, 0),
)
with freeze_time(now):
assert is_article_available_now(article) == expected
@pytest.mark.parametrize(
'now, expected',
[
(datetime(2014, 4, 8, 12, 0, 0), False),
(datetime(2014, 9, 15, 17, 59, 59), False),
(datetime(2014, 9, 15, 18, 0, 0), True ),
(datetime(2014, 9, 19, 15, 0, 0), True ),
(datetime(2014, 9, 23, 17, 59, 59), True ),
(datetime(2014, 9, 23, 18, 0, 0), True ),
(datetime(2014, 11, 4, 12, 0, 0), True ),
],
)
def test_is_available_with_start_and_without_end(now, expected):
article = create_article(
datetime(2014, 9, 15, 18, 0, 0),
None,
)
with freeze_time(now):
assert is_article_available_now(article) == expected
@pytest.mark.parametrize(
'now, expected',
[
(datetime(2014, 4, 8, 12, 0, 0), True ),
(datetime(2014, 9, 15, 17, 59, 59), True ),
(datetime(2014, 9, 15, 18, 0, 0), True ),
(datetime(2014, 9, 19, 15, 0, 0), True ),
(datetime(2014, 9, 23, 17, 59, 59), True ),
(datetime(2014, 9, 23, 18, 0, 0), False),
(datetime(2014, 11, 4, 12, 0, 0), False),
],
)
def test_is_available_without_start_and_with_end(now, expected):
article = create_article(
None,
datetime(2014, 9, 23, 18, 0, 0),
)
with freeze_time(now):
assert is_article_available_now(article) == expected
@pytest.mark.parametrize(
'now, expected',
[
(datetime(2014, 4, 8, 12, 0, 0), True ),
(datetime(2014, 9, 15, 17, 59, 59), True ),
(datetime(2014, 9, 15, 18, 0, 0), True ),
(datetime(2014, 9, 19, 15, 0, 0), True ),
(datetime(2014, 9, 23, 17, 59, 59), True ),
(datetime(2014, 9, 23, 18, 0, 0), True ),
(datetime(2014, 11, 4, 12, 0, 0), True ),
],
)
def test_is_available_without_start_and_without_end(now, expected):
article = create_article(
None,
None,
)
with freeze_time(now):
assert is_article_available_now(article) == expected
def create_article(
available_from: Optional[datetime], available_until: Optional[datetime]
) -> Article:
return Article(
id=ArticleID(generate_uuid()),
shop_id=ShopID('any-shop'),
item_number=ArticleNumber('article-123'),
type_=ArticleType.other,
type_params={},
description='Cool thing',
price=Decimal('1.99'),
tax_rate=Decimal('0.19'),
available_from=available_from,
available_until=available_until,
total_quantity=1,
quantity=1,
max_quantity_per_order=1,
not_directly_orderable=False,
separate_order_required=False,
processing_required=False,
)
| """
:Copyright: 2014-2022 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime
from decimal import Decimal
from typing import Optional
from freezegun import freeze_time
import pytest
from byceps.database import generate_uuid
from byceps.services.shop.article.transfer.models import (
Article,
ArticleID,
ArticleNumber,
ArticleType,
)
from byceps.services.shop.article.service import is_article_available_now
from byceps.services.shop.shop.transfer.models import ShopID
@pytest.mark.parametrize(
'now, expected',
[
(datetime(2014, 4, 8, 12, 0, 0), False),
(datetime(2014, 9, 15, 17, 59, 59), False),
(datetime(2014, 9, 15, 18, 0, 0), True ),
(datetime(2014, 9, 19, 15, 0, 0), True ),
(datetime(2014, 9, 23, 17, 59, 59), True ),
(datetime(2014, 9, 23, 18, 0, 0), False),
(datetime(2014, 11, 4, 12, 0, 0), False),
],
)
def test_is_available_with_start_and_end(now, expected):
article = create_article(
datetime(2014, 9, 15, 18, 0, 0),
datetime(2014, 9, 23, 18, 0, 0),
)
with freeze_time(now):
assert is_article_available_now(article) == expected
@pytest.mark.parametrize(
'now, expected',
[
(datetime(2014, 4, 8, 12, 0, 0), False),
(datetime(2014, 9, 15, 17, 59, 59), False),
(datetime(2014, 9, 15, 18, 0, 0), True ),
(datetime(2014, 9, 19, 15, 0, 0), True ),
(datetime(2014, 9, 23, 17, 59, 59), True ),
(datetime(2014, 9, 23, 18, 0, 0), True ),
(datetime(2014, 11, 4, 12, 0, 0), True ),
],
)
def test_is_available_with_start_and_without_end(now, expected):
article = create_article(
datetime(2014, 9, 15, 18, 0, 0),
None,
)
with freeze_time(now):
assert is_article_available_now(article) == expected
@pytest.mark.parametrize(
'now, expected',
[
(datetime(2014, 4, 8, 12, 0, 0), True ),
(datetime(2014, 9, 15, 17, 59, 59), True ),
(datetime(2014, 9, 15, 18, 0, 0), True ),
(datetime(2014, 9, 19, 15, 0, 0), True ),
(datetime(2014, 9, 23, 17, 59, 59), True ),
(datetime(2014, 9, 23, 18, 0, 0), False),
(datetime(2014, 11, 4, 12, 0, 0), False),
],
)
def test_is_available_without_start_and_with_end(now, expected):
article = create_article(
None,
datetime(2014, 9, 23, 18, 0, 0),
)
with freeze_time(now):
assert is_article_available_now(article) == expected
@pytest.mark.parametrize(
'now, expected',
[
(datetime(2014, 4, 8, 12, 0, 0), True ),
(datetime(2014, 9, 15, 17, 59, 59), True ),
(datetime(2014, 9, 15, 18, 0, 0), True ),
(datetime(2014, 9, 19, 15, 0, 0), True ),
(datetime(2014, 9, 23, 17, 59, 59), True ),
(datetime(2014, 9, 23, 18, 0, 0), True ),
(datetime(2014, 11, 4, 12, 0, 0), True ),
],
)
def test_is_available_without_start_and_without_end(now, expected):
article = create_article(
None,
None,
)
with freeze_time(now):
assert is_article_available_now(article) == expected
def create_article(
available_from: Optional[datetime], available_until: Optional[datetime]
) -> Article:
return Article(
id=ArticleID(generate_uuid()),
shop_id=ShopID('any-shop'),
item_number=ArticleNumber('article-123'),
type_=ArticleType.other,
type_params={},
description='Cool thing',
price=Decimal('1.99'),
tax_rate=Decimal('0.19'),
available_from=available_from,
available_until=available_until,
total_quantity=1,
quantity=1,
max_quantity_per_order=1,
not_directly_orderable=False,
separate_order_required=False,
processing_required=False,
)
| en | 0.713704 | :Copyright: 2014-2022 <NAME> :License: Revised BSD (see `LICENSE` file for details) | 2.124643 | 2 |
alive.py | konsolxnxx/Petercord-Userbotilham | 0 | 6633245 | <filename>alive.py
from pyrogram import Filters
from userbot import bot
@bot.on_message(Filters.regex("^.alive"))
def amialivedad(event):
chat = event.chat.id
message = " 𝐏𝐨𝐜𝐨𝐧𝐠 𝐎𝐧𝐥𝐞𝐧 User :)"
bot.edit_message_text(chat_id=chat, message_id="me", text=message)
| <filename>alive.py
from pyrogram import Filters
from userbot import bot
@bot.on_message(Filters.regex("^.alive"))
def amialivedad(event):
chat = event.chat.id
message = " 𝐏𝐨𝐜𝐨𝐧𝐠 𝐎𝐧𝐥𝐞𝐧 User :)"
bot.edit_message_text(chat_id=chat, message_id="me", text=message)
| none | 1 | 2.356373 | 2 |
|
Artificial Intelligence/Natural Language Processing/Compute the Perplexity.py | rahamath2009/git-github.com-nishant-sethi-HackerRank | 76 | 6633246 | cross_entropy = 9.91
print (int(2 **cross_entropy )) | cross_entropy = 9.91
print (int(2 **cross_entropy )) | none | 1 | 1.627618 | 2 |
|
FinalProject/MachineLearning/Perceptron/perceptron.py | CKPalk/MachineLearning | 0 | 6633247 | <filename>FinalProject/MachineLearning/Perceptron/perceptron.py
''' Work of <NAME> '''
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def main( argv ):
try:
training_filename = argv[ 1 ]
testing_filename = argv[ 2 ]
output_filename = argv[ 3 ]
except IndexError:
print( "Error, usage: \"python3 {} <training> <testing> <output>\"".format( argv[ 0 ] ) )
return
Training_DataFrame = pd.read_csv( training_filename )
X = Training_DataFrame.ix[:,0:-1]
Y = Training_DataFrame.ix[:,-1]
Testing_DataFrame = pd.read_csv( testing_filename )
testing_X = Testing_DataFrame.ix[:,0:-1]
testing_Y = Testing_DataFrame.ix[:,-1]
'''
Perceptron
'''
from sklearn.linear_model import Perceptron
# Hyper Parameters:
alpha = 0.0001
n_iter = 20
# Fit Classifier
print( "{} Started training".format( str( datetime.now() ) ) )
P_classifier = Perceptron( alpha = alpha, n_iter = n_iter )
P_classifier.fit( X, Y )
print( "{} Stopped training".format( str( datetime.now() ) ) )
# Report results
P_score = P_classifier.score( testing_X, testing_Y )
print( "\nPerceptron Accuracy:", P_score )
#
if __name__=='__main__':
main( sys.argv )
| <filename>FinalProject/MachineLearning/Perceptron/perceptron.py
''' Work of <NAME> '''
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def main( argv ):
try:
training_filename = argv[ 1 ]
testing_filename = argv[ 2 ]
output_filename = argv[ 3 ]
except IndexError:
print( "Error, usage: \"python3 {} <training> <testing> <output>\"".format( argv[ 0 ] ) )
return
Training_DataFrame = pd.read_csv( training_filename )
X = Training_DataFrame.ix[:,0:-1]
Y = Training_DataFrame.ix[:,-1]
Testing_DataFrame = pd.read_csv( testing_filename )
testing_X = Testing_DataFrame.ix[:,0:-1]
testing_Y = Testing_DataFrame.ix[:,-1]
'''
Perceptron
'''
from sklearn.linear_model import Perceptron
# Hyper Parameters:
alpha = 0.0001
n_iter = 20
# Fit Classifier
print( "{} Started training".format( str( datetime.now() ) ) )
P_classifier = Perceptron( alpha = alpha, n_iter = n_iter )
P_classifier.fit( X, Y )
print( "{} Stopped training".format( str( datetime.now() ) ) )
# Report results
P_score = P_classifier.score( testing_X, testing_Y )
print( "\nPerceptron Accuracy:", P_score )
#
if __name__=='__main__':
main( sys.argv )
| en | 0.52164 | Work of <NAME> Perceptron # Hyper Parameters: # Fit Classifier # Report results # | 3.466509 | 3 |
iogt_users/middlewares.py | unicef/io | 20 | 6633248 | <reponame>unicef/io
from django.shortcuts import redirect
from django.urls import resolve
from home.models import SiteSettings
class RegistrationSurveyRedirectMiddleware:
"""
The purpose of this middleware is to make the registration survey form
mandatory. See https://github.com/unicef/iogt/issues/113 for details
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
site_settings = SiteSettings.for_request(request)
if site_settings.registration_survey:
is_registration_survey_url = request.path_info == site_settings.registration_survey.localized.url
else:
is_registration_survey_url = False
allowed_url_names = ['account_logout']
current_url = resolve(request.path_info).url_name
is_url_allowed = current_url in allowed_url_names or is_registration_survey_url
is_registered_user = not request.user.is_anonymous
if is_registered_user and not request.user.has_filled_registration_survey \
and not is_url_allowed and site_settings.registration_survey:
site_settings = SiteSettings.for_request(request)
return redirect(site_settings.registration_survey.localized.url)
return self.get_response(request)
| from django.shortcuts import redirect
from django.urls import resolve
from home.models import SiteSettings
class RegistrationSurveyRedirectMiddleware:
"""
The purpose of this middleware is to make the registration survey form
mandatory. See https://github.com/unicef/iogt/issues/113 for details
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
site_settings = SiteSettings.for_request(request)
if site_settings.registration_survey:
is_registration_survey_url = request.path_info == site_settings.registration_survey.localized.url
else:
is_registration_survey_url = False
allowed_url_names = ['account_logout']
current_url = resolve(request.path_info).url_name
is_url_allowed = current_url in allowed_url_names or is_registration_survey_url
is_registered_user = not request.user.is_anonymous
if is_registered_user and not request.user.has_filled_registration_survey \
and not is_url_allowed and site_settings.registration_survey:
site_settings = SiteSettings.for_request(request)
return redirect(site_settings.registration_survey.localized.url)
return self.get_response(request) | en | 0.86099 | The purpose of this middleware is to make the registration survey form mandatory. See https://github.com/unicef/iogt/issues/113 for details | 2.262813 | 2 |
maju/conector/mysql/__init__.py | ymussi/maju-playlist | 0 | 6633249 | import os
from sqlalchemy import create_engine
from maju.config import read_config
from maju.conector.sql import SQLDBContext
def mysql_engine(schema, pool_size=1, max_overflow=25):
dbname = read_config().get("schema", schema)
con_str = read_config().get("database", dbname)
engine = create_engine("{}/{}".format(con_str, schema),
pool_size=pool_size, max_overflow=max_overflow, pool_recycle=30 * 60)
return engine
class CadastroDBContext(SQLDBContext):
def __init__(self, engine):
super().__init__(engine) | import os
from sqlalchemy import create_engine
from maju.config import read_config
from maju.conector.sql import SQLDBContext
def mysql_engine(schema, pool_size=1, max_overflow=25):
dbname = read_config().get("schema", schema)
con_str = read_config().get("database", dbname)
engine = create_engine("{}/{}".format(con_str, schema),
pool_size=pool_size, max_overflow=max_overflow, pool_recycle=30 * 60)
return engine
class CadastroDBContext(SQLDBContext):
def __init__(self, engine):
super().__init__(engine) | none | 1 | 2.693702 | 3 |
|
tests/unit/modules/test_cmci_delete.py | vera-chan/ibm_zos_cics | 0 | 6633250 | <reponame>vera-chan/ibm_zos_cics
# -*- coding: utf-8 -*-
# Copyright (c) IBM Corporation 2020
# Apache License, Version 2.0 (see https://opensource.org/licenses/Apache-2.0)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.ibm.ibm_zos_cics.plugins.modules import cmci_delete
from ansible_collections.ibm.ibm_zos_cics.tests.unit.helpers.cmci_helper import (
HOST, PORT, CONTEXT, cmci_module, CMCITestHelper
)
def test_delete_context(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_delete('cicsdefinitionbundle', 1)
cmci_module.expect(
result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/cicsdefinitionbundle/CICSEX56/',
1
)
)
cmci_module.run(cmci_delete, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'type': 'cicsdefinitionbundle'
})
def test_delete_context_scope(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_delete('cicsdefinitionbundle', 1, scope='IYCWEMW2')
cmci_module.expect(
result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/cicsdefinitionbundle/CICSEX56/IYCWEMW2',
1
)
)
cmci_module.run(cmci_delete, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'scope': 'IYCWEMW2',
'type': 'cicsdefinitionbundle'
})
def test_delete_criteria(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_delete('cicsdefinitionbundle', 1, parameters='?CRITERIA=%28FOO%3D%27BAR%27%29')
cmci_module.expect(
result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/'
'cicsdefinitionbundle/CICSEX56/?CRITERIA=%28FOO%3D%27BAR%27%29',
1
)
)
cmci_module.run(cmci_delete, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'type': 'cicsdefinitionbundle',
'resources': {
'filter': {
'FOO': 'BAR'
}
}
})
def test_delete_parameter(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_delete('cicsdefinitionbundle', 1, parameters='?PARAMETER=CSDGROUP%28%2A%29')
cmci_module.expect(
result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/'
'cicsdefinitionbundle/CICSEX56/?PARAMETER=CSDGROUP%28%2A%29',
1
)
)
cmci_module.run(cmci_delete, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'type': 'cicsdefinitionbundle',
'resources': {
'parameter': 'CSDGROUP(*)'
}
})
def test_delete_criteria_parameter(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_delete(
'cicsdefinitionbundle',
1,
parameters='?CRITERIA=%28FOO%3D%27BAR%27%29&PARAMETER=CSDGROUP%28%2A%29'
)
cmci_module.expect(
result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/'
'cicsdefinitionbundle/CICSEX56/?CRITERIA=%28FOO%3D%27BAR%27%29&PARAMETER=CSDGROUP%28%2A%29',
1
)
)
cmci_module.run(cmci_delete, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'type': 'cicsdefinitionbundle',
'resources': {
'filter': {
'FOO': 'BAR'
},
'parameter': 'CSDGROUP(*)'
}
})
def result(url, success_count):
return {
'changed': True,
'connect_version': '0560',
'cpsm_reason': '',
'cpsm_reason_code': 0,
'cpsm_response': 'OK',
'cpsm_response_code': 1024,
'http_status': 'OK',
'http_status_code': 200,
'record_count': 1,
'request': {
'url': url,
'method': 'DELETE',
'body': None
},
'success_count': success_count
}
| # -*- coding: utf-8 -*-
# Copyright (c) IBM Corporation 2020
# Apache License, Version 2.0 (see https://opensource.org/licenses/Apache-2.0)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.ibm.ibm_zos_cics.plugins.modules import cmci_delete
from ansible_collections.ibm.ibm_zos_cics.tests.unit.helpers.cmci_helper import (
HOST, PORT, CONTEXT, cmci_module, CMCITestHelper
)
def test_delete_context(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_delete('cicsdefinitionbundle', 1)
cmci_module.expect(
result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/cicsdefinitionbundle/CICSEX56/',
1
)
)
cmci_module.run(cmci_delete, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'type': 'cicsdefinitionbundle'
})
def test_delete_context_scope(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_delete('cicsdefinitionbundle', 1, scope='IYCWEMW2')
cmci_module.expect(
result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/cicsdefinitionbundle/CICSEX56/IYCWEMW2',
1
)
)
cmci_module.run(cmci_delete, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'scope': 'IYCWEMW2',
'type': 'cicsdefinitionbundle'
})
def test_delete_criteria(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_delete('cicsdefinitionbundle', 1, parameters='?CRITERIA=%28FOO%3D%27BAR%27%29')
cmci_module.expect(
result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/'
'cicsdefinitionbundle/CICSEX56/?CRITERIA=%28FOO%3D%27BAR%27%29',
1
)
)
cmci_module.run(cmci_delete, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'type': 'cicsdefinitionbundle',
'resources': {
'filter': {
'FOO': 'BAR'
}
}
})
def test_delete_parameter(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_delete('cicsdefinitionbundle', 1, parameters='?PARAMETER=CSDGROUP%28%2A%29')
cmci_module.expect(
result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/'
'cicsdefinitionbundle/CICSEX56/?PARAMETER=CSDGROUP%28%2A%29',
1
)
)
cmci_module.run(cmci_delete, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'type': 'cicsdefinitionbundle',
'resources': {
'parameter': 'CSDGROUP(*)'
}
})
def test_delete_criteria_parameter(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_delete(
'cicsdefinitionbundle',
1,
parameters='?CRITERIA=%28FOO%3D%27BAR%27%29&PARAMETER=CSDGROUP%28%2A%29'
)
cmci_module.expect(
result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/'
'cicsdefinitionbundle/CICSEX56/?CRITERIA=%28FOO%3D%27BAR%27%29&PARAMETER=CSDGROUP%28%2A%29',
1
)
)
cmci_module.run(cmci_delete, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'type': 'cicsdefinitionbundle',
'resources': {
'filter': {
'FOO': 'BAR'
},
'parameter': 'CSDGROUP(*)'
}
})
def result(url, success_count):
return {
'changed': True,
'connect_version': '0560',
'cpsm_reason': '',
'cpsm_reason_code': 0,
'cpsm_response': 'OK',
'cpsm_response_code': 1024,
'http_status': 'OK',
'http_status_code': 200,
'record_count': 1,
'request': {
'url': url,
'method': 'DELETE',
'body': None
},
'success_count': success_count
} | en | 0.464769 | # -*- coding: utf-8 -*- # Copyright (c) IBM Corporation 2020 # Apache License, Version 2.0 (see https://opensource.org/licenses/Apache-2.0) # type: (CMCITestHelper) -> None # type: (CMCITestHelper) -> None # type: (CMCITestHelper) -> None # type: (CMCITestHelper) -> None # type: (CMCITestHelper) -> None | 1.783035 | 2 |
Subsets and Splits