text
stringlengths 957
885k
|
---|
""" Module providing unit-testing for `~halotools.utils.value_added_halo_table_functions`.
"""
from __future__ import (absolute_import, division, print_function)
from copy import deepcopy
from collections import Counter
import numpy as np
import pytest
from astropy.extern.six.moves import xrange as range
from ..value_added_halo_table_functions import broadcast_host_halo_property, add_halo_hostid
from ..crossmatch import crossmatch
from ...sim_manager import FakeSim
from ...custom_exceptions import HalotoolsError
__all__ = ('test_broadcast_host_halo_mass1', )
def test_broadcast_host_halo_mass1():
"""
"""
fake_sim = FakeSim()
t = fake_sim.halo_table
broadcast_host_halo_property(t, 'halo_mvir', delete_possibly_existing_column=True)
assert 'halo_mvir_host_halo' in list(t.keys())
hostmask = t['halo_hostid'] == t['halo_id']
assert np.all(t['halo_mvir_host_halo'][hostmask] == t['halo_mvir'][hostmask])
assert np.any(t['halo_mvir_host_halo'][~hostmask] != t['halo_mvir'][~hostmask])
# Verify that both the group_member_generator method and the
# crossmatch method give identical results for calculation of host halo mass
idx_table1, idx_table2 = crossmatch(t['halo_hostid'], t['halo_id'])
t['tmp'] = np.zeros(len(t), dtype=t['halo_mvir'].dtype)
t['tmp'][idx_table1] = t['halo_mvir'][idx_table2]
assert np.all(t['tmp'] == t['halo_mvir_host_halo'])
data = Counter(t['halo_hostid'])
frequency_analysis = data.most_common()
for igroup in range(0, 10):
idx = np.where(t['halo_hostid'] == frequency_analysis[igroup][0])[0]
idx_host = np.where(t['halo_id'] == frequency_analysis[igroup][0])[0]
assert np.all(t['halo_mvir_host_halo'][idx] == t['halo_mvir'][idx_host])
for igroup in range(-10, -1):
idx = np.where(t['halo_hostid'] == frequency_analysis[igroup][0])[0]
idx_host = np.where(t['halo_id'] == frequency_analysis[igroup][0])[0]
assert np.all(t['halo_mvir_host_halo'][idx] == t['halo_mvir'][idx_host])
del t
def test_broadcast_host_halo_mass2():
"""
"""
fake_sim = FakeSim()
with pytest.raises(HalotoolsError) as err:
broadcast_host_halo_property(4, 'xxx')
substr = "The input ``table`` must be an Astropy `~astropy.table.Table` object"
assert substr in err.value.args[0]
def test_broadcast_host_halo_mass3():
"""
"""
fake_sim = FakeSim()
t = fake_sim.halo_table
with pytest.raises(HalotoolsError) as err:
broadcast_host_halo_property(t, 'xxx')
substr = "The input table does not have the input ``halo_property_key``"
assert substr in err.value.args[0]
def test_broadcast_host_halo_mass4():
"""
"""
fake_sim = FakeSim()
t = fake_sim.halo_table
with pytest.raises(HalotoolsError) as err:
broadcast_host_halo_property(t, 'halo_mvir')
substr = "Your input table already has an existing new_colname column name."
assert substr in err.value.args[0]
broadcast_host_halo_property(t, 'halo_mvir', delete_possibly_existing_column=True)
def test_add_halo_hostid1():
"""
"""
with pytest.raises(HalotoolsError) as err:
add_halo_hostid(5, delete_possibly_existing_column=False)
substr = "The input ``table`` must be an Astropy `~astropy.table.Table` object"
assert substr in err.value.args[0]
def test_add_halo_hostid2():
"""
"""
fake_sim = FakeSim()
t = fake_sim.halo_table
del t['halo_id']
with pytest.raises(HalotoolsError) as err:
add_halo_hostid(t, delete_possibly_existing_column=False)
substr = "The input table must have ``halo_upid`` and ``halo_id`` keys"
assert substr in err.value.args[0]
def test_add_halo_hostid3():
"""
"""
fake_sim = FakeSim()
t = fake_sim.halo_table
with pytest.raises(HalotoolsError) as err:
add_halo_hostid(t, delete_possibly_existing_column=False)
substr = "Your input table already has an existing ``halo_hostid`` column name."
assert substr in err.value.args[0]
existing_halo_hostid = deepcopy(t['halo_hostid'].data)
del t['halo_hostid']
add_halo_hostid(t, delete_possibly_existing_column=False)
assert np.all(t['halo_hostid'] == existing_halo_hostid)
add_halo_hostid(t, delete_possibly_existing_column=True)
assert np.all(t['halo_hostid'] == existing_halo_hostid)
|
import logging
from typing import Callable, Union
from tags_model import (TagCategory, TagCategoryBase, TagCategoryBaseItem, TagItem)
tag_configuration: list[TagCategoryBase] = list()
def load_tag_configuration(config_file_name: str) -> None:
with open(config_file_name, mode='r', encoding='utf-8-sig') as f:
is_heading: bool = True
current_heading_index: int = -1
for line in f:
tag_line: str = line.strip()
if not tag_line:
is_heading = True
# An empty line is marking the category end.
# The next line is the other category beginning.
continue
if is_heading:
tag_configuration.append(TagCategoryBase((tag_line, None)))
current_heading_index += 1
is_heading = False
else:
tag_configuration[current_heading_index].add_item(tag_line)
log_tags('Loaded configuration:', tag_configuration)
def load_tag_category(loaded_categories: list[TagCategory], tag_config: TagCategoryBase,
included_predicate: Callable[[TagItem], bool]) -> TagCategory:
def initialize_tag(tag_category: TagCategory, tag_config: TagCategoryBaseItem,
included_predicate: Callable[[TagItem], bool]) -> TagItem:
result: TagItem = TagItem((tag_config.name, tag_category))
# Use a predicate or an included property initializer?
result.included = included_predicate(result)
return result
result: TagCategory = TagCategory((tag_config.name, None))
loaded_categories.append(result)
result.items = [initialize_tag(result, tag, included_predicate) for tag in tag_config.items]
return result
def load_tags(tags_file_name: str) -> list[TagCategory]:
def load_current_tags() -> set[str]:
with open(tags_file_name, mode='r', encoding='utf-8-sig') as f:
# Skip <!DOCTYPE html> header line
next(f)
# strip '<div>' from left and '</div>\n' from right for the tag name
result: set[str] = {get_tag_key(line[5:-7]) for line in f}
return result
def get_tag_key(tag_name: str) -> str:
return tag_name.upper()
def unregister_tag(tag: str) -> bool:
result: bool = tag in current_tags
if result:
current_tags.remove(tag)
return result
current_tags: set[str] = load_current_tags()
result: list[TagCategory] = list()
for tag_category in tag_configuration:
load_tag_category(result, tag_category, lambda tag: unregister_tag(get_tag_key(tag.name)))
if len(current_tags):
additional: TagCategoryBase = TagCategoryBase(('Additional tags', None))
additional.items = [TagCategoryBaseItem((tag_name, additional)) for tag_name in current_tags]
load_tag_category(result, additional, lambda t: True)
log_tags('Loaded file tags:', result)
return result
def save_tags(tags_file_name: str, tag_categories: list[str]) -> None:
with open(tags_file_name, mode='w', encoding='utf-8-sig') as f:
f.write('<!DOCTYPE html>\n')
for tag in tag_categories:
_ = f.write(f'<div>{tag}</div>\n')
def log_tags(list_description: str, tag_list: Union[list[TagCategoryBase], list[TagCategory]]) -> None:
logging.debug(list_description)
for category in tag_list:
[logging.debug(f'{category.name} : {tag.__dict__}') for tag in category.items]
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
<filename>pkgs/ops-pkg/src/genie/libs/ops/lldp/iosxr/tests/lldp_output.py
'''LLDP Genie Ops Object Outputs for IOSXR.'''
class LldpOutput(object):
ShowLldp = {
"hello_timer": 30,
"enabled": True,
"hold_timer": 120,
"status": "active",
"reinit_delay": 2
}
ShowLldpEntry = {
'interfaces': {
'GigabitEthernet0/0/0/0': {
'port_id': {
'GigabitEthernet2': {
'neighbors': {
'R1_csr1000v.openstacklocal': {
'chassis_id': '001e.49f7.2c00',
'port_description': 'GigabitEthernet2',
'system_name': 'R1_csr1000v.openstacklocal',
'neighbor_id': 'R1_csr1000v.openstacklocal',
'system_description': 'Cisco IOS Software [Everest], Virtual XE Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 16.6.1, RELEASE SOFTWARE (fc2)\nTechnical Support: http://www.cisco.com/techsupport\nCopyright (c) 1986-2017 by Cisco Systems, Inc.\nCompiled Sat 22-Jul-17 05:51 by',
'time_remaining': 117,
'hold_time': 120,
'capabilities': {
'bridge': {
'system': True,
},
'router': {
'system': True,
'enabled': True,
},
},
'management_address': '10.1.2.1',
},
},
},
},
},
'GigabitEthernet0/0/0/1': {
'port_id': {
'Ethernet1/2': {
'neighbors': {
'R3_n9kv': {
'chassis_id': '5e00.8002.0009',
'port_description': 'Ethernet1/2',
'system_name': 'R3_n9kv',
'neighbor_id': 'R3_n9kv',
'system_description': 'Cisco Nexus Operating System (NX-OS) Software 7.0(3)I7(1)\nTAC support: http://www.cisco.com/tac\nCopyright (c) 2002-2017, Cisco Systems, Inc. All rights reserved.\n',
'time_remaining': 103,
'hold_time': 120,
'capabilities': {
'bridge': {
'system': True,
'enabled': True,
},
'router': {
'system': True,
'enabled': True,
},
},
},
},
},
},
},
},
'total_entries': 2,
}
ShowLldpNeighborsDetail = {
'interfaces': {
'GigabitEthernet0/0/0/0': {
'port_id': {
'GigabitEthernet2': {
'neighbors': {
'R1_csr1000v.openstacklocal': {
'chassis_id': '001e.49f7.2c00',
'port_description': 'GigabitEthernet2',
'system_name': 'R1_csr1000v.openstacklocal',
'neighbor_id': 'R1_csr1000v.openstacklocal',
'system_description': 'Cisco IOS Software [Everest], Virtual XE Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 16.6.1, RELEASE SOFTWARE (fc2)\nTechnical Support: http://www.cisco.com/techsupport\nCopyright (c) 1986-2017 by Cisco Systems, Inc.\nCompiled Sat 22-Jul-17 05:51 by',
'time_remaining': 90,
'hold_time': 120,
'capabilities': {
'bridge': {
'system': True,
},
'router': {
'system': True,
'enabled': True,
},
},
'management_address': '10.1.2.1',
},
},
},
},
},
'GigabitEthernet0/0/0/1': {
'port_id': {
'Ethernet1/2': {
'neighbors': {
'R3_n9kv': {
'chassis_id': '5e00.8002.0009',
'port_description': 'Ethernet1/2',
'system_name': 'R3_n9kv',
'neighbor_id': 'R3_n9kv',
'system_description': 'Cisco Nexus Operating System (NX-OS) Software 7.0(3)I7(1)\nTAC support: http://www.cisco.com/tac\nCopyright (c) 2002-2017, Cisco Systems, Inc. All rights reserved.\n',
'time_remaining': 106,
'hold_time': 120,
'capabilities': {
'bridge': {
'system': True,
'enabled': True,
},
'router': {
'system': True,
'enabled': True,
},
},
},
},
},
},
},
},
'total_entries': 2,
}
ShowLldpTraffic = {
"counters": {
"frame_in": 399,
"frame_out": 588,
"frame_error_in": 0,
"frame_discard": 0,
"tlv_discard": 119,
'tlv_unknown': 119,
'entries_aged_out': 0
}
}
ShowLldpInterface = {
'interfaces': {
'GigabitEthernet0/0/0/0': {
'tx': 'enabled',
'rx': 'enabled',
'tx_state': 'idle',
'rx_state': 'wait for frame',
},
'GigabitEthernet0/0/0/1': {
'tx': 'enabled',
'rx': 'enabled',
'tx_state': 'idle',
'rx_state': 'wait for frame',
},
}
}
lldpOutput = {
'enabled': True,
'hello_timer': 30,
'hold_timer': 120,
'interfaces': {
'GigabitEthernet0/0/0/1': {
'port_id': {
'Ethernet1/2': {
'neighbors': {
'R3_n9kv': {
'neighbor_id': 'R3_n9kv',
'system_name': 'R3_n9kv',
'system_description': 'Cisco Nexus Operating System (NX-OS) Software 7.0(3)I7(1)\nTAC support: http://www.cisco.com/tac\nCopyright (c) 2002-2017, Cisco Systems, Inc. All rights reserved.\n',
'chassis_id': '5e00.8002.0009',
'port_description': 'Ethernet1/2',
'capabilities': {
'router': {
'enabled': True,
},
'bridge': {
'enabled': True,
},
},
},
},
},
},
'enabled': True,
},
'GigabitEthernet0/0/0/0': {
'port_id': {
'GigabitEthernet2': {
'neighbors': {
'R1_csr1000v.openstacklocal': {
'neighbor_id': 'R1_csr1000v.openstacklocal',
'system_name': 'R1_csr1000v.openstacklocal',
'system_description': 'Cisco IOS Software [Everest], Virtual XE Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 16.6.1, RELEASE SOFTWARE (fc2)\nTechnical Support: http://www.cisco.com/techsupport\nCopyright (c) 1986-2017 by Cisco Systems, Inc.\nCompiled Sat 22-Jul-17 05:51 by',
'chassis_id': '001e.49f7.2c00',
'port_description': 'GigabitEthernet2',
'management_address': '10.1.2.1',
'capabilities': {
'router': {
'enabled': True,
},
},
},
},
},
},
'enabled': True,
},
},
'counters': {
'frame_in': 399,
'frame_out': 588,
'frame_error_in': 0,
'frame_discard': 0,
'tlv_discard': 119,
'tlv_unknown': 119,
'entries_aged_out': 0,
},
}
|
<gh_stars>1-10
"""secp256k1 elliptic curve cryptography interface."""
# The process for using SECP256k1 is complex and more involved than ED25519.
#
# See https://xrpl.org/cryptographic-keys.html#secp256k1-key-derivation
# for an overview of the algorithm.
from __future__ import annotations
from hashlib import sha256
from typing import Callable, Tuple, Type, cast
from ecpy.curves import Curve # type: ignore
from ecpy.ecdsa import ECDSA # type: ignore
from ecpy.keys import ECPrivateKey, ECPublicKey # type: ignore
from typing_extensions import Final, Literal
from xrpl.core.keypairs.crypto_implementation import CryptoImplementation
from xrpl.core.keypairs.exceptions import XRPLKeypairsException
from xrpl.core.keypairs.helpers import sha512_first_half
_CURVE: Final[Curve] = Curve.get_curve("secp256k1")
_GROUP_ORDER: Final[int] = _CURVE.order
_SIGNER: Final[ECDSA] = ECDSA("DER")
# String keys must be _KEY_LENGTH long
_KEY_LENGTH: Final[int] = 66
# Pad string keys with _PADDING_PREFIX to reach _KEY_LENGTH
_PADDING_PREFIX: Final[str] = "0"
# Generated sequence values are _SEQUENCE_SIZE bytes unsigned big-endian
_SEQUENCE_SIZE: Final[int] = 4
_SEQUENCE_MAX: Final[int] = 256 ** _SEQUENCE_SIZE
# Intermediate private keys are always padded with 4 bytes of zeros
_INTERMEDIATE_KEYPAIR_PADDING: Final[bytes] = (0).to_bytes(
4,
byteorder="big",
signed=False,
)
class SECP256K1(CryptoImplementation):
"""
Methods for using the ECDSA cryptographic system with the secp256k1
elliptic curve.
"""
@classmethod
def derive_keypair(
cls: Type[SECP256K1], decoded_seed: bytes, is_validator: bool
) -> Tuple[str, str]:
"""
Derive the public and private secp256k1 keys from a given seed value.
Args:
decoded_seed: The secp256k1 seed to derive a key pair from, as bytes.
is_validator: Whether to derive a validator keypair.
Returns:
A (public key, private key) pair derived from the given seed.
"""
root_public, root_private = cls._do_derive_part(decoded_seed, "root")
# validator keys just stop at the first pass
if is_validator:
return cls._format_keys(root_public, root_private)
mid_public, mid_private = cls._do_derive_part(
cls._public_key_to_bytes(root_public),
"mid",
)
final_public, final_private = cls._derive_final_pair(
root_public,
root_private,
mid_public,
mid_private,
)
return cls._format_keys(final_public, final_private)
@classmethod
def sign(cls: Type[SECP256K1], message: bytes, private_key: str) -> bytes:
"""
Signs a message using a given secp256k1 private key.
Args:
message: The message to sign, as bytes.
private_key: The private key to use to sign the message.
Returns:
The signature of the message, as bytes.
"""
wrapped_private = ECPrivateKey(int(private_key, 16), _CURVE)
return cast(
bytes,
_SIGNER.sign_rfc6979(
sha512_first_half(message),
wrapped_private,
sha256,
canonical=True,
),
)
@classmethod
def is_valid_message(
cls: Type[SECP256K1], message: bytes, signature: bytes, public_key: str
) -> bool:
"""
Verifies the signature on a given message.
Args:
message: The message to validate.
signature: The signature of the message.
public_key: The public key to use to verify the message and
signature.
Returns:
Whether the message is valid for the given signature and public key.
"""
public_key_point = _CURVE.decode_point(bytes.fromhex(public_key))
wrapped_public = ECPublicKey(public_key_point)
return cast(
bool,
_SIGNER.verify(sha512_first_half(message), signature, wrapped_public),
)
@classmethod
def _format_keys(
cls: Type[SECP256K1], public: ECPublicKey, private: ECPrivateKey
) -> Tuple[str, str]:
return (
cls._format_key(cls._public_key_to_str(public)),
cls._format_key(cls._private_key_to_str(private)),
)
@classmethod
def _format_key(cls: Type[SECP256K1], keystr: str) -> str:
return keystr.rjust(_KEY_LENGTH, _PADDING_PREFIX).upper()
@classmethod
def _public_key_to_bytes(cls: Type[SECP256K1], key: ECPublicKey) -> bytes:
return bytes(_CURVE.encode_point(key.W, compressed=True))
@classmethod
def _public_key_to_str(cls: Type[SECP256K1], key: ECPublicKey) -> str:
return cls._public_key_to_bytes(key).hex()
@classmethod
def _do_derive_part(
cls: Type[SECP256K1], bytes_input: bytes, phase: Literal["root", "mid"]
) -> Tuple[ECPublicKey, ECPrivateKey]:
"""
Given bytes_input determine public/private keypair for a given phase of
this algorithm. The difference between generating the root and
intermediate keypairs is just what bytes are input by the caller and that
the intermediate keypair needs to inject _INTERMEDIATE_KEYPAIR_PADDING
into the value to hash to get the raw private key.
"""
def _candidate_merger(candidate: bytes) -> bytes:
if phase == "root":
return bytes_input + candidate
return bytes_input + _INTERMEDIATE_KEYPAIR_PADDING + candidate
raw_private = cls._get_secret(_candidate_merger)
wrapped_private = ECPrivateKey(int.from_bytes(raw_private, "big"), _CURVE)
return wrapped_private.get_public_key(), wrapped_private
@classmethod
def _derive_final_pair(
cls: Type[SECP256K1],
root_public: ECPublicKey,
root_private: ECPrivateKey,
mid_public: ECPublicKey,
mid_private: ECPrivateKey,
) -> Tuple[ECPublicKey, ECPrivateKey]:
raw_private = (root_private.d + mid_private.d) % _GROUP_ORDER
wrapped_private = ECPrivateKey(raw_private, _CURVE)
wrapped_public = ECPublicKey(_CURVE.add_point(root_public.W, mid_public.W))
return wrapped_public, wrapped_private
@classmethod
def _get_secret(
cls: Type[SECP256K1], candidate_merger: Callable[[bytes], bytes]
) -> bytes:
"""
Given a function `candidate_merger` that knows how
to prepare a sequence candidate bytestring into
a possible full candidate secret, returns the first sequence
value that is valid. If none are valid, raises; however this
should be so exceedingly rare as to ignore.
"""
for raw_root in range(_SEQUENCE_MAX):
root = raw_root.to_bytes(
_SEQUENCE_SIZE,
byteorder="big",
signed=False,
)
candidate = sha512_first_half(candidate_merger(root))
if cls._is_secret_valid(candidate):
return candidate
raise XRPLKeypairsException(
"""Could not determine a key pair.
This is extremely improbable. Please try again.""",
)
@classmethod
def _is_secret_valid(cls: Type[SECP256K1], secret: bytes) -> bool:
numerical_secret = int.from_bytes(secret, "big")
return numerical_secret in range(1, _GROUP_ORDER)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for validation.py.
"""
import json
import os
import tempfile
import unittest
from app.executor import validation
class ValidationTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self.repo_dir = self.tmp_dir.name
def tearDown(self):
self.tmp_dir.cleanup()
def test_import_targets_valid_absolute_names(self):
manifest_path = os.path.join(self.repo_dir,
'scripts/us_fed/manifest.json')
os.makedirs(os.path.dirname(manifest_path), exist_ok=True)
with open(manifest_path, 'w+') as manifest:
manifest.write(
json.dumps(
{'import_specifications': [{
'import_name': 'treasury'
}]}))
manifest_path = os.path.join(self.repo_dir, 'us_bls/cpi/manifest.json')
os.makedirs(os.path.dirname(manifest_path), exist_ok=True)
with open(manifest_path, 'w+') as manifest:
manifest.write(
json.dumps(
{'import_specifications': [{
'import_name': 'cpi_u'
}]}))
validation.are_import_targets_valid(
['scripts/us_fed:treasury', 'us_bls/cpi:cpi_u'],
['utils/template.py'], self.repo_dir, 'manifest.json')
def test_import_targets_valid_name_not_exist(self):
manifest_path = os.path.join(self.repo_dir,
'scripts/us_fed/manifest.json')
os.makedirs(os.path.dirname(manifest_path), exist_ok=True)
with open(manifest_path, 'w+') as manifest:
manifest.write(
json.dumps(
{'import_specifications': [{
'import_name': 'treasury'
}]}))
with self.assertRaises(ValueError) as context:
validation.are_import_targets_valid(['scripts/us_fed:treasuryyy'],
['utils/template.py'],
self.repo_dir, 'manifest.json')
self.assertIn('treasuryyy not found', str(context.exception))
def test_import_targets_valid_manifest_not_exist(self):
with self.assertRaises(ValueError) as context:
validation.are_import_targets_valid(
['scripts/us_fed:treasury', 'us_bls/cpi:cpi_u'],
['utils/template.py'], self.repo_dir, 'manifest.json')
self.assertIn('manifest.json does not exist',
str(context.exception))
def test_import_targets_valid_relative_names(self):
manifest_path = os.path.join(self.repo_dir,
'scripts/us_fed/manifest.json')
os.makedirs(os.path.dirname(manifest_path), exist_ok=True)
with open(manifest_path, 'w+') as file:
manifest = {
'import_specifications': [{
'import_name': 'treasury1'
}, {
'import_name': 'treasury2'
}]
}
file.write(json.dumps(manifest))
validation.are_import_targets_valid(['treasury1', 'treasury2'],
['scripts/us_fed'], self.repo_dir,
'manifest.json')
def test_import_targets_valid_relative_names_multiple_dirs(self):
manifest_path = os.path.join(self.repo_dir,
'scripts/us_fed/manifest.json')
os.makedirs(os.path.dirname(manifest_path), exist_ok=True)
with open(manifest_path, 'w+') as file:
manifest = {
'import_specifications': [{
'import_name': 'treasury1'
}, {
'import_name': 'treasury2'
}]
}
file.write(json.dumps(manifest))
with self.assertRaises(ValueError) as context:
validation.are_import_targets_valid(['treasury1', 'treasury2'],
['scripts/us_fed', 'foo/bar'],
self.repo_dir, 'manifest.json')
self.assertIn('relative import names', str(context.exception))
def test_import_spec_valid(self):
import_dir = 'scripts/us_fed'
os.makedirs(os.path.join(self.repo_dir, import_dir, 'dir'),
exist_ok=True)
script_path = os.path.join(self.repo_dir, import_dir, 'dir/foo.py')
print(script_path)
with open(script_path, 'w+') as script:
script.write('line\n')
script.flush()
script_path = os.path.join(self.repo_dir, import_dir, 'bar.py')
with open(script_path, 'w+') as script:
script.write('line\n')
script.flush()
spec = {
'import_name': 'treausry',
'provenance_url': 'url',
'provenance_description': 'description',
'curator_emails': 'curator',
'scripts': ['dir/foo.py', 'dir/../bar.py']
}
validation._is_import_spec_valid(spec, self.repo_dir, import_dir)
def test_import_spec_valid_fields_absent(self):
spec = {
'import_name': 'treausry',
'scripts': ['dir/foo.py', 'dir/../bar.py']
}
with self.assertRaises(ValueError) as context:
validation._is_import_spec_valid(spec, self.repo_dir,
'scripts/us_fed')
self.assertIn(
'provenance_url, provenance_description, curator_emails',
str(context.exception))
def test_import_spec_valid_script_not_exist(self):
spec = {
'import_name': 'treausry',
'provenance_url': 'url',
'provenance_description': 'description',
'curator_emails': 'curator',
'scripts': ['dir/foo.py', 'dir/../bar.py']
}
with self.assertRaises(ValueError) as context:
validation._is_import_spec_valid(spec, self.repo_dir,
'scripts/us_fed')
self.assertIn('dir/foo.py, dir/../bar.py', str(context.exception))
def test_manifest_valid_fields_absent(self):
with self.assertRaises(ValueError) as context:
validation.is_manifest_valid({}, self.repo_dir, 'scripts/us_fed')
self.assertIn('import_specifications not found',
str(context.exception))
|
from matplotlib import pyplot as plt
from matplotlib.patches import Polygon
import seaborn as sns
from utils import *
from analysis import *
from SpikeVidUtils import *
def tidy_axis(ax, top=False, right=False, left=False, bottom=False):
ax.spines['top'].set_visible(top)
ax.spines['right'].set_visible(right)
ax.spines['left'].set_visible(left)
ax.spines['bottom'].set_visible(bottom)
ax.xaxis.set_tick_params(top='off', direction='out', width=1)
ax.yaxis.set_tick_params(right='off', left='off', direction='out', width=1)
def plot_neurons(ax, df, neurons, color_map):
# print(df.head())
for id_ in neurons:
df_id = df[df['ID'] == id_]
if len(df_id) == 0:
break
color = color_map[id_]
ax.scatter(df_id['Time'], df_id['ID'], color=color, marker="|", s=150, label='Simulated')
# ax.set_ylim(0, len(neurons))
xlim = int(max(df['Interval']))
ax.set_xlim(0, xlim)
ax.set_xticks(np.linspace(0, xlim, num=3))
ax.tick_params(axis='y', labelsize=15)
ax.tick_params(axis='x', labelsize=15)
def plot_raster_trial(df1, df2, trials, neurons):
color_labels = neurons
rgb_values = sns.color_palette("bright", len(neurons))
color_map = dict(zip(color_labels, rgb_values))
fig, ax = plt.subplots(nrows=len(trials), ncols=2, figsize=(12,10), squeeze=False)
for n, trial in enumerate(trials):
df1_trial_n = df1[df1['Trial'] == trial]
df2_trial_n = df2[df2['Trial'] == trial]
ax[n][0].set_ylabel(f'Trial {trial}')
plot_neurons(ax[n][0], df1_trial_n, neurons, color_map)
plot_neurons(ax[n][1], df2_trial_n, neurons, color_map)
# ax[0][n].get_shared_x_axes().join(ax[0][0], ax[0][n])
# ax[1][n].get_shared_x_axes().join(ax[0][0], ax[1][n])
plt.setp(ax, yticks=neurons, yticklabels=neurons)
ax[0][0].set_title('True')
ax[0][1].set_title('Predicted')
fig.supxlabel('Time (S)')
fig.supylabel('Neuron ID')
plt.tight_layout()
def plot_raster_trial(df1, df2, trials, neurons):
color_labels = neurons
rgb_values = sns.color_palette("bright", len(neurons))
color_map = dict(zip(color_labels, rgb_values))
fig, ax = plt.subplots(nrows=len(trials), ncols=2, figsize=(12,10), squeeze=False)
for n, trial in enumerate(trials):
df1_trial_n = df1[df1['Trial'] == trial]
df2_trial_n = df2[df2['Trial'] == trial]
ax[n][0].set_ylabel(f'Trial {trial}')
plot_neurons(ax[n][0], df1_trial_n, neurons, color_map)
plot_neurons(ax[n][1], df2_trial_n, neurons, color_map)
# ax[0][n].get_shared_x_axes().join(ax[0][0], ax[0][n])
# ax[1][n].get_shared_x_axes().join(ax[0][0], ax[1][n])
plt.setp(ax, yticks=neurons, yticklabels=neurons)
ax[0][0].set_title('True')
ax[0][1].set_title('Predicted')
fig.supxlabel('Time (S)')
fig.supylabel('Neuron ID')
plt.tight_layout()
def get_id_intervals(df, n_id, intervals):
id_intervals = np.zeros(len(intervals))
interval_counts = df[df['ID'] == n_id].groupby(df['Interval']).size()
id_intervals[interval_counts.index.astype(int).tolist()] = interval_counts.index.astype(int).tolist()
return id_intervals.tolist()
def get_id_intervals(df, n_id, intervals):
id_intervals = np.zeros(len(intervals))
interval_counts = df[df['ID'] == n_id].groupby(df['Interval']).size()
id_intervals[interval_counts.index.astype(int).tolist()] = interval_counts.index.astype(int).tolist()
return id_intervals.tolist()
def plot_var(ax, df, variable, values, color_map, m_s=150, l_w=1):
for value in values:
color = color_map[value]
data = df[df[variable] == value]
data[variable] = data[variable].astype('str')
ax.scatter(data['Time'], data[variable], color=color, # c=data[variable].map(color_map),
marker="|", s=m_s, linewidth=l_w)
# ax.xaxis.set_tick_params(top='off', direction='out', width=1)
ax.yaxis.set_tick_params(right='off', left='off', direction='out', width=1)
ax.set_ylim(0, len(values))
xlim = int(max(df['Interval']))
ax.set_xlim(0, xlim)
ax.set_xticks(np.linspace(0, xlim, num=3))
ax.tick_params(axis='y', labelsize=10)
ax.tick_params(axis='x', labelsize=10)
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
# ax.spines['left'].set_visible(False)
ax.xaxis.set_tick_params(top='off', direction='out', width=1)
# ax.yaxis.set_tick_params(right='off', direction='out', width=1)
ms_firing = 25
line_width = 0.75
lw_scatter = 0.1
def plot_firing_comparison(df_1, df_2, id, trials, intervals, figure_name=None):
'''
get trial averaged spikes (PSTH)
'''
id_ = id
true = df_1[(df_1['Trial'].isin(trials)) & (df_1['ID'] == id_)].reset_index(drop=True)
pred = df_2[(df_2['Trial'].isin(trials)) & (df_2['ID'] == id_)].reset_index(drop=True)
rates_1_id = get_rates(true, [id_], intervals)[id_]
rates_2_id = get_rates(pred, [id_], intervals)[id_]
left, width = 0.15, 0.85
bottom, height = 0.1, 0.1
spacing = 0.005
height_hist = 0.10
rect_scatter_1 = [left, bottom*4, width, height]
rect_scatter_2 = [left, bottom*3, width, height]
rect_hist1 = [left, bottom*2, width, height_hist]
# rect_hist2 = [left, bottom*1, width, height_hist]
# rect_histy = [left + width + spacing, bottom, 0.2, height]
if figure_name is None:
fig = plt.figure(figsize=(10, 10))
else:
fig = figure_name
# ax_rast_1 = fig.add_subaxes(rect_scatter_1)
# ax_rast_2 = fig.add_axes(rect_scatter_2, sharex=ax_rast_1)
# ax_hist_1 = fig.add_axes(rect_hist1, sharex=ax_rast_1)
# ax_hist_2 = fig.add_axes(rect_hist2, sharex=ax_rast_1)
tidy_axis(fig)
no_top_right_ticks(fig)
fig.set_yticks([])
fig.set_yticklabels([])
fig.axis('off')
ax_rast_1 = fig.inset_axes(rect_scatter_1)
ax_rast_2 = fig.inset_axes(rect_scatter_2, sharex=ax_rast_1)
ax_hist_1 = fig.inset_axes(rect_hist1, sharex=ax_rast_1)
ax_rast_2.axis('off')
ax_rast_1.axis('off')
axes_list = [ax_rast_1, ax_rast_2, ax_hist_1]
# colors = sns.color_palette("gist_ncar_r", 2)
colors = ['black', 'red']
def plot_raster_scatter(ax, data, color, label):
ax.scatter(data['Interval'], data['ID'], c=color, s=ms_firing, linewidth=lw_scatter, marker='|', label=label)
ax.set_xlabel(label)
# ax.scatter(true['Interval'], true['ID'].astype('str'), color='#069AF3', marker='|')
plot_raster_scatter(ax_rast_2, pred, colors[0], 'Simulated')
plot_raster_scatter(ax_rast_1, true, colors[1], 'True')
# sns.distplot(true['Interval'], hist=False)
# sns.distplot(pred['Interval'], hist=False)
sns.kdeplot(pred['Interval'], ax=ax_hist_1, bw_adjust=.25, color=colors[0], lw=line_width, alpha=0.7) #plot(np.array(intervals), rates_1_id, color=colors[0], lw=3)
sns.kdeplot(true['Interval'], ax=ax_hist_1, bw_adjust=.25, color=colors[1], lw=line_width, alpha=0.7) #plot(np.array(intervals), rates_2_id, color=colors[1], lw=3)
ax_hist_1.set_ylabel('')
ax_hist_1.set_yticks([])
sns.despine(top=True, left=True)
# tidy_axis(ax_hist_1, bottom=True)
# tidy_axis(ax_hist_2, bottom=True)
ax_hist_1.set_xlabel([])
# ax_hist_1.spines['bottom'].set_visible(False)
# ax_rast_1.spines['bottom'].set_visible(False)
# ax_rast_2.spines['bottom'].set_visible(False)
# ax_hist_1.spines['top'].set_visible(False)
# ax_hist_2.spines['top'].set_visible(False)
# xlabels = np.arange(0, max(intervals) + 1, 60)
# xticks, xlabels = xlabels, xlabels
max_intervals = math.ceil(df_1['Interval'].max())
# max_intervals = max(intervals)
xticks, xlabels = [0,max_intervals // 2, max_intervals], [0,max_intervals // 2, max_intervals]
yticks, ylabels = np.arange(len(trials)), list(map(str, trials))
for ax in axes_list:
tidy_axis(ax, bottom=True)
no_top_right_ticks(ax)
ax.set_xlim(0, max(intervals))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels)
ax.set_yticks([])
ax.set_yticklabels([])
# ax_hist_1.set_xlabel('Time (s)', fontsize=20)
ax_hist_1.set_xlabel('', fontsize=20)
legend = fig.legend(bbox_to_anchor=(0.25, 0.01), ncol=3, frameon=True, fontsize=17.5) # bbox_to_anchor=(0.75, 0.55)
ax_rast_1.set_title("{}".format(id_), fontsize=20)
def plot_firing_comparison_sweeps(df_1, df_2, id, trials, intervals, figure_name=None):
'''
get trial averaged spikes (PSTH)
'''
left, width = 0.15, 0.85
bottom, height = 0.1, 0.1
spacing = 0.005
height_hist = 0.10
rect_hist1 = [left, bottom*2, width, height_hist]
# rect_hist2 = [left, bottom*1, width, height_hist]
# rect_histy = [left + width + spacing, bottom, 0.2, height]
if figure_name is None:
# fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 10))
fig = plt.subplot()
else:
fig = figure_name
tidy_axis(fig)
no_top_right_ticks(fig)
fig.set_yticks([])
fig.set_yticklabels([])
fig.axis('off')
ax_dict_true = dict()
ax_dict_pred = dict()
for n, trial in enumerate(trials):
ax_dict_true[trial] = fig.inset_axes([left, bottom * (3+n), width, height_hist])
ax_dict_pred[trial] = fig.inset_axes([left, bottom * (3+n+len(trials)), width, height_hist], sharex=ax_dict_true[trial])
ax_dict_true[trial].axis('off')
ax_dict_pred[trial].axis('off')
ax_hist_1 = fig.inset_axes(rect_hist1, sharex=ax_dict_true[trials[0]])
axes_list = [list(ax_dict_true.values()), list(ax_dict_pred.values()), [ax_hist_1]]
# colors = sns.color_palette("gist_ncar_r", 2)
colors = ['black', 'red']
def plot_raster_scatter(ax, data, color, label):
ax.scatter(data['Interval'], data['ID'], c=color, s=ms_firing, marker='|', linewidth=lw_scatter, label=label)
ax.set_xlabel(label)
# ax.scatter(true['Interval'], true['ID'].astype('str'), color='#069AF3', marker='|')
for n, trial in enumerate(trials):
id_ = id
true = df_1[(df_1['Trial'] == trial) & (df_1['ID'] == id_)].reset_index(drop=True)
pred = df_2[(df_2['Trial'] == trial) & (df_2['ID'] == id_)].reset_index(drop=True)
if id_ == 345:
print(true, pred)
plot_raster_scatter(ax_dict_pred[trial], pred, colors[0], 'Simulated')
plot_raster_scatter(ax_dict_true[trial], true, colors[1], 'True')
sns.kdeplot(pred['Interval'], ax=ax_hist_1, bw_adjust=.25, color=colors[0], lw=line_width, alpha=0.7) #plot(np.array(intervals), rates_1_id, color=colors[0], lw=3)
sns.kdeplot(true['Interval'], ax=ax_hist_1, bw_adjust=.25, color=colors[1], lw=line_width, alpha=0.7) #plot(np.array(intervals), rates_2_id, color=colors[1], lw=3)
max_intervals = df_1['Interval'].max()
yticks, ylabels = np.arange(len(trials)), list(map(str, trials))
xticks, xlabels = [0,max_intervals // 2, max_intervals], [0,max_intervals // 2, max_intervals]
for ax in axes_list:
ax = ax[0]
tidy_axis(ax, bottom=True)
no_top_right_ticks(ax)
ax.set_xlim(0, max(intervals))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels)
ax.set_yticks([])
ax.set_yticklabels([])
# ax_hist_1.set_xlim(0, max(intervals))
# sns.distplot(true['Interval'], hist=False)
# sns.distplot(pred['Interval'], hist=False)
ax_hist_1.set_ylabel('')
ax_hist_1.set_yticks([])
sns.despine(top=True, left=True)
# tidy_axis(ax_hist_1, bottom=True)
# tidy_axis(ax_hist_2, bottom=True)
ax_hist_1.set_xlabel('')
# ax_hist_1.set_xlabel('Time (s)', fontsize=20)
legend = fig.legend(bbox_to_anchor=(0.25, 0.01), ncol=3, frameon=True, fontsize=17.5) # bbox_to_anchor=(0.75, 0.55)
list(ax_dict_pred.values())[-1].set_title("{}".format(id_), fontsize=20)
def get_psth(df, n_id, trials):
df = df[df['ID'] == n_id]
df = df[df['Trial'] == trial]
df = df.groupby('Interval_dt').size().reset_index()
df.columns = ['Interval_dt', 'Count']
return df
def set_categorical_ticks(ax, yticks=None, ylabels=None, xticks=None, xlabels=None, fs=None):
fs = fs if fs is not None else 10
if yticks is not None:
ax.set_ylim(0, len(ylabels))
ax.set_yticks(yticks)
ax.set_yticklabels(ylabels)
if xticks is not None:
ax.set_xlim(0, max(xlabels))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='x', labelsize=10)
ax.tick_params(axis='y', labelsize=fs)
ax.get_xaxis().tick_bottom() # remove unneeded ticks
ax.get_yaxis().tick_left()
def no_top_right_ticks(ax):
ax.set_yticklabels([])
ax.set_yticks([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.yaxis.set_tick_params(top='off', direction='out', width=1)
ax.yaxis.set_tick_params(top='off', right='off', left='on', direction='out', width=1)
ax.tick_params(labelright='off', labeltop='off')
ax.tick_params(axis='both', direction='out')
ax.get_xaxis().tick_bottom() # remove unneeded ticks
ax.get_yaxis().tick_left()
def plot_neurons_trials_psth(df_1, df_2, neurons, trials, intervals, figuresize=None):
fs = 15
plt.rcParams['xtick.labelsize']= fs
plt.rcParams['ytick.labelsize']= fs
plt.rcParams['axes.labelsize']= fs
plt.rcParams['axes.titlesize']= fs
plt.rcParams['legend.fontsize']= fs
plt.rcParams['lines.linewidth']= 2
# plt.rcParams['fig.supylabel']= fs
df_1 = df_1.reset_index(drop=True)
df_2 = df_2.reset_index(drop=True)
dt = 4
intervals_dt = [dt * n for n in range(int((intervals[-1]) // dt) + 1)]
df_1['Interval_dt'] = pd.cut(df_1['Interval'], intervals_dt, include_lowest=True)
df_2['Interval_dt'] = pd.cut(df_2['Interval'], intervals_dt, include_lowest=True)
# neuron_list = list(map(str, sorted(top_corr[:6].index.tolist())))
neurons = list(map(str, [i for i in neurons]))
trials = df_1['Trial'].unique()
# neuron_list = sorted(top_corr[:10].index.tolist())
scale = 1
nrows, ncols = 4, len(neurons)
fig_size = figuresize if figuresize is not None else (2 * scale * len(neurons),10 * scale)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=fig_size)
variable = 'Trial'
color_labels = trials
rgb_values = sns.color_palette("gist_ncar_r", len(trials))
color_map = dict(zip(color_labels, rgb_values))
max_freq = 0
for n, neuron in enumerate(neurons):
df_1['ID'] = df_1['ID'].astype('str')
df_2['ID'] = df_2['ID'].astype('str')
df_1_id = df_1[df_1['ID'] == neuron]
df_2_id = df_2[df_2['ID'] == neuron]
max_intervals = 32
# max_intervals = max(intervals)
yticks, ylabels = np.arange(len(trials)), list(map(str, trials))
xticks, xlabels = [0,max_intervals // 2, max_intervals], [0,max_intervals // 2, max_intervals]
m_s = 45
l_w = 0.5
plot_var(ax[0][n], df_1_id, variable, trials, color_map, m_s, l_w=l_w)
plot_var(ax[1][n], df_2_id, variable, trials, color_map, m_s, l_w=l_w)
set_categorical_ticks(ax[0][n], yticks, ylabels, xticks, xlabels)
set_categorical_ticks(ax[1][n], yticks, ylabels, xticks, xlabels)
ax[0][n].set_yticks([])
ax[1][n].set_yticks([])
if n > 0:
no_top_right_ticks(ax[0][n])
no_top_right_ticks(ax[1][n])
df_1['ID'] = df_1['ID'].astype('int')
df_2['ID'] = df_2['ID'].astype('int')
neuron_int = int(neuron)
df_1_id = df_1[df_1['ID'] == neuron_int]
df_2_id = df_2[df_2['ID'] == neuron_int]
# rates_1 = get_rates(df_1, [neuron_int], intervals_dt)[neuron_int]
# rates_2 = get_rates(df_2, [neuron_int], intervals_dt)[neuron_int]
freq_id_1 = df_1_id['Interval'].value_counts().reindex(intervals, fill_value=0)
freq_id_2 = df_2_id['Interval'].value_counts().reindex(intervals, fill_value=0)
bins = np.arange(len(intervals) // 2)
# bins = len(intervals)
# ax[2][n].bar(intervals_dt, freq_id_1)
# ax[2][n].hist([freq_id_1, freq_id_2], bins=bins, histtype='step', edgecolor=['blue', 'red'],
# lw=2, alpha=0.3, facecolor=['blue', 'red'], label=['True', 'Sim'])
c_2, c_1 = rgb_values[2], rgb_values[-1]
ax[2][n].hist(df_1_id['Interval'], bins=bins, edgecolor=None, lw=2, alpha=1, facecolor=c_1, label='True')
ax[3][n].hist(df_2_id['Interval'], bins=bins, edgecolor=None, lw=2, alpha=1, facecolor=c_2, label='Predicted') # histtype='step'
# xticks, xlabels = [0, max(intervals) // 2, max(intervals)], [0, max(intervals) // 2, max(intervals)]
y_fs_hist = 15
set_categorical_ticks(ax[2][n], None, None, xticks, xlabels, y_fs_hist)
ax[2][n].spines['right'].set_visible(False)
ax[2][n].spines['top'].set_visible(False)
set_categorical_ticks(ax[3][n], None, None, xticks, xlabels, y_fs_hist)
ax[3][n].spines['right'].set_visible(False)
ax[3][n].spines['top'].set_visible(False)
if n > 0:
no_top_right_ticks(ax[2][n])
ax[3][n].get_shared_y_axes().join(ax[2][n], ax[2][n-1])
no_top_right_ticks(ax[3][n])
max_lim = (max(ax[2][n].get_ylim()[1], ax[3][n].get_ylim()[1]))
ax[0][n].set_xticklabels([])
ax[1][n].set_xticklabels([])
ax[2][n].set_xticklabels([])
ax[2][n].set_ylim(0, max_lim)
ax[3][n].set_ylim(0, max_lim)
ax[2][n].get_shared_y_axes().join(ax[3][n], ax[3][n-1])
# max_freq = max(freq_id_1.max(), freq_id_2.max(), max_freq)
# yticks, ylabels = np.linspace(0, max(freq_id_1.max(), freq_id_2.max()), 3), [i for i in range(max(freq_id_1.max(), freq_id_2.max()))]
# set_categorical_ticks(ax[2][n], yticks, ylabels, xticks, xlabels)
plt.setp(ax[0])
# ax[0][0].set_ylim(0, 32)
ax[0][0].set_ylabel('Ground Truth')
ax[1][0].set_ylabel('Simulated')
# ax[2][0].set_ylabel('PSTH, True')
# ax[3][0].set_ylabel('PSTH, Simulation')
# ax[2][-1].legend()
ax[0][0].legend(bbox_to_anchor=(0,0,1,1))
# fig.supxlabel('Time (S)', fontsize=15, y=0.07)
# fig.supylabel('Trials')
fig.suptitle('Gabor 3D Sim', fontsize=20, y=0.925)
# fig.gca().set_aspect('equal', adjustable='box')
# plt.autoscale()
# plt.tight_layout()
def get_boxplot_data(df_1, df_2, intervals, trials):
data_boxplot_true = []
data_boxplot_pred = []
for n, trial in enumerate(trials):
trial_prev = trials[n - 1] if n > 0 else trials[n + 1]
true_prev = df_1[df_1['Trial'] == trial_prev].reset_index(drop=True)
true = df_1[df_1['Trial'] == trial].reset_index(drop=True)
pred = df_2[df_2['Trial'] == trial].reset_index(drop=True)
rates_true_prev, rates_true, rates_pred = get_rates_trial(true_prev, intervals), get_rates_trial(true, intervals), get_rates_trial(pred, intervals)
corr_trials_true = calc_corr_psth(rates_true, rates_true_prev)
corr_trials_pred = calc_corr_psth(rates_true, rates_pred)
data_boxplot_true.append(np.array(corr_trials_true).flatten())
data_boxplot_pred.append(np.array(corr_trials_pred).flatten())
return data_boxplot_true, data_boxplot_pred, corr_trials_true, corr_trials_pred
def plot_error_bar(x, n, color):
"""
databoxplot_true, databoxplot_pred, corr_trials_true, corr_trials_pred = get_boxplot_data(df_1, df_2, intervals, n_trial)
plot_error_bar(corr_trials_true, n, true_color)
plot_error_bar(corr_trials_pred, n, pred_color)
"""
mins = x.min()
maxes = x.max()
means = x.mean()
std = x.std()
# plt.errorbar(n, means, std, fmt='ok', lw=3)
# plt.errorbar(n, means, [means - mins, maxes - means],
# fmt='.k', ecolor='gray', lw=1)
# plt.xlim(-1, 8)
green_diamond = dict(markerfacecolor=color, marker='o')
# fig3, ax3 = plt.subplots()
# ax3.set_title('Changed Outlier Symbols')
ax.boxplot(x, flierprops=green_diamond)
def fancy_boxplot(fig, ax1, data, color):
bp = ax1.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# ax1.set(
# axisbelow=True, # Hide the grid behind plot objects
# title='Comparison of IID Bootstrap Resampling Across Five Distributions',
# xlabel='Distribution',
# ylabel='Value',
# )
# Now fill the boxes with desired colors
# box_colors = ['darkkhaki', 'royalblue']
# box_colors = sns.dark_palette("#69d", len(data), reverse=True)
box_colors = [color]
num_boxes = len(data)
medians = np.empty(num_boxes)
for i in range(num_boxes):
box = bp['boxes'][i]
box_x = []
box_y = []
for j in range(5):
box_x.append(box.get_xdata()[j])
box_y.append(box.get_ydata()[j])
box_coords = np.column_stack([box_x, box_y])
# Alternate between Dark Khaki and Royal Blue
ax1.add_patch(Polygon(box_coords, facecolor=box_colors[0]))
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
median_x = []
median_y = []
for j in range(2):
median_x.append(med.get_xdata()[j])
median_y.append(med.get_ydata()[j])
ax1.plot(median_x, median_y, 'k')
medians[i] = median_y[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
ax1.plot(np.average(med.get_xdata()), np.average(data[i]),
color='w', marker='*', markeredgecolor='k')
# Set the axes ranges and axes labels
# ax1.set_xlim(0.5, num_boxes + 0.5)
# top = 40
# bottom = -5
# ax1.set_ylim(bottom, top)
# ax1.set_xticklabels(np.repeat(random_dists, 2),
# rotation=45, fontsize=8)
# Due to the Y-axis scale being different across samples, it can be
# hard to compare differences in medians across the samples. Add upper
# X-axis tick labels with the sample medians to aid in comparison
# (just use two decimal places of precision)
pos = np.arange(num_boxes) + 1
upper_labels = [str(round(s, 2)) for s in medians]
weights = ['bold', 'semibold']
for tick, label in zip(range(num_boxes), ax1.get_xticklabels()):
k = tick % 2
ax1.text(pos[tick], .95, upper_labels[tick],
transform=ax1.get_xaxis_transform(),
horizontalalignment='center', size='x-small',
weight=weights[k], color=box_colors[0])
fig.supxlabel('Trials')
fig.supylabel('Pearson Correlation (P)')
fig.suptitle('Inter-Neuron Correlation Across Trials')
plt.tight_layout()
def plot_intertrial_corr(corr_true, corr_pred, trial):
def scatter_hist(x, y, ax, ax_histy):
# no labels
# ax_histx.tick_params(axis="x", labelbottom=False)
ax_histy.tick_params(axis="y", labelleft=False)
# the scatter plot:
# ax.scatter(x, y)
# bins = 250
# now determine nice limits by hand:
# binwidth = 0.25
# xymax = max(np.max(np.abs(x)), np.max(np.abs(y)))
# lim = (int(xymax/binwidth) + 1) * binwidth
# bins = np.arange(-lim, lim + binwidth, binwidth)
# ax_histx.hist(x, bins=bins)
ax_hist = sns.distplot(y, hist=False, ax=ax_histy, vertical=True) # (x, y, bins=10, orientation='horizontal')
ax_hist.set(xlabel=None)
# sns.distplot(top_corr, hist=False, ax=ax_histy, vertical=True)
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
# rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
# start with a square Figure
fig = plt.figure(figsize=(15, 15))
ax = fig.add_axes(rect_scatter)
# ax_histx = fig.add_axes(rect_histx, sharex=ax)
ax_histy = fig.add_axes(rect_histy, sharey=ax)
# use the previously defined function
scatter_hist(np.array(corr_true.index), corr_true, ax, ax_histy)
scatter_hist(np.array(corr_pred.index), corr_pred, ax, ax_histy)
ax.grid(lw=0.8, alpha=0.7, color='gray')
ax.scatter(corr_true.index, corr_true, label=f'Trial {trial} vs. 1', alpha=0.4)
ax.scatter(corr_pred.index, corr_pred, label=f'Trial {trial} vs. Pred', alpha=0.5)
ax.set_title('Pair-wise Correlation Between Trials', fontsize=25)
ax.set_xlabel('Neuron ID', fontsize=20)
ax.set_ylim(-0.1, 0.6)
plt.ylabel('Pearson Correlation (p)')
ax.legend(fontsize=20, title_fontsize=20)
plt.show() |
#! /usr/bin/env python3
import sys
import pickle
import argparse
import numpy as np
import pandas as pd
import scipy.stats as stats
def like_calc(X, y_test, unc):
"""
Given a simulated entry with uncertainty and a test entry, calculates the
likelihood that they are the same.
Parameters
----------
X : numpy array (train DB) of nuclide measurements for simulated entry
y_test : numpy array (single row) of nuclide measurements for test
("measured") entry
unc : float representing flat uncertainty percentage, or 0.0 indicated
counting error
Returns
-------
like: likelihood that the test entry is the simulated entry
"""
# TODO UNTESTED CODE (but not recently in use)
idx = np.nonzero(y_test)[0]
y_test = y_test[idx]
X = X[:, idx]
# unc arg of 0 indicates for the script to use sqrt(counts) uncertainty
if unc == 0.0:
std = np.sqrt(X)
else:
std = unc * X
like = np.prod(stats.norm.pdf(X, loc=y_test, scale=std), axis=1)
return like
def ll_calc(X, y_test, unc):
"""
Given a simulated entry with uncertainty and a test entry, calculates the
log-likelihood that they are the same.
Parameters
----------
X : numpy array (train DB) of nuclide measurements for simulated entry
y_test : numpy array (single row) of nuclide measurements for test
("measured") entry
unc : float representing flat uncertainty percentage, or 0.0 indicated
counting error
Returns
-------
ll: numpy array of log-likelihoods that the test entry is the simulated
entry for each entry in the DB
"""
idx = np.nonzero(y_test)[0]
y_test = y_test[idx]
X = X[:, idx]
# unc arg of 0 indicates for the script to use sqrt(counts) uncertainty
if unc == 0.0:
std = np.sqrt(X)
else:
std = unc * X
ll = np.sum(stats.norm.logpdf(X, loc=y_test, scale=std), axis=1)
return ll
def unc_calc(X, y_test, unc):
"""
Given a simulated entry and a test entry with uniform uncertainty,
calculates the uncertainty in the log-likelihood calculation.
Parameters
----------
X : numpy array (train DB) of nuclide measurements for simulated entry
y_test : numpy array (single row) of nuclide measurements for test
("measured") entry
unc : float representing flat uncertainty percentage, or 0.0 indicated
counting error
Returns
-------
ll_unc: numpy array of log-likelihood uncertainties for each DB entry
"""
idx = np.nonzero(y_test)[0]
y_test = y_test[idx]
X = X[:, idx]
# unc arg of 0 indicates for the script to use sqrt(counts) uncertainty
if unc == 0.0:
sim_unc_sq = X
tst_unc_sq = y_test
else:
sim_unc_sq = (unc * X)**2
tst_unc_sq = (unc * y_test)**2
unc_array = ((X - y_test) / sim_unc_sq)**2 * (sim_unc_sq + tst_unc_sq)
np.nan_to_num(unc_array, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
unc_array = np.array(unc_array, dtype=np.float64)
ll_unc = np.sqrt(np.sum(unc_array, axis=1))
return ll_unc
def ratios(XY, ratio_list, labels):
"""
Given a dataframe with entries (rows) that contain nuclide measurements and
some labels, calculate the predetermined ratios of the measurements.
Parameters
----------
XY : dataframe of spent fuel entries containing nuclide measurements and
their labels
ratio_list : list of nuclide ratios
labels : list of label titles in the dataframe
Returns
-------
XY_ratios : dataframe of spent fuel entries containing nuclide measurement
ratios and their labels
"""
XY_ratios = XY.loc[:, labels].copy()
for ratio in ratio_list:
nucs = ratio.split('/')
XY_ratios[ratio] = XY[nucs[0]] / XY[nucs[1]]
XY_ratios.replace([np.inf, -np.inf], 0, inplace=True)
XY_ratios.fillna(0, inplace = True)
# reorganize columns
cols = ratio_list + labels
XY_ratios = XY_ratios[cols]
return XY_ratios
def format_pred(pred_row, lbls, nonlbls, cdf_cols):
"""
This separates off the formatting of the pred_ll dataframe from the
get_pred function for cleanliness.
Parameters
----------
pred_row : single-row dataframe including nuclide measurements, the
prediction (i.e., the predicted labels), and all saved log-
likelihoods
lbls : list of labels that are predicted
nonlbls : list of reactor parameters that aren't being predicted
cdf_cols : list of new LogLL columns added to prediction for CDF plot
Returns
-------
pred_row : single-row dataframe including the prediction (i.e., predicted
labels), LLmax, LLUnc, and a list of LLs and their Uncs to
populate a CDF
"""
lbls = lbls + nonlbls
pred_lbls = ["pred_" + s for s in lbls]
pred_row.rename(columns=dict(zip(lbls, pred_lbls)), inplace=True)
pred_lbls.extend(cdf_cols)
pred_row = pred_row.loc[:, pred_lbls]
return pred_row
def ll_cdf(pred_ll, ll_df):
"""
Returns a single-row dataframe with the prediction/MaxLogLL with 8 new
columns of log-likelihoods that can populate a CDF, which includes the 2nd
largest LogLL, and 7 percentiles that should give a decent picture of the
CDF curve. (and all corresponding uncertainties)
Parameters
----------
pred_ll : single-row dataframe including nuclide measurements, the
prediction (i.e., the predicted labels), and maxLL/LLUnc
ll_df : two-column dataframe including log-likelihood calculations and
their uncertainties for a given test sample calculation against
entire training db
Returns
-------
pred_ll : single-row dataframe including nuclide measurements, the
prediction (i.e., the predicted labels), and all saved log-
likelihoods (Max and CDF-relevant)
cdf_cols : list of column names that are the new LogLL columns added for
CDF
"""
old_cols = pred_ll.columns.values.tolist()
# First, grab adjacent LL value to MaxLL
cols = ll_df.columns.values.tolist()
maxs = ll_df.nlargest(2, cols[0])
pred_ll['2ndMaxLogLL'] = maxs[cols[0]].iloc[1]
pred_ll['2ndMaxLLUnc'] = maxs[cols[1]].iloc[1]
# Second, add columns with percentiles in the col name
quants = [0.9998, 0.9988, 0.95, 0.9, 0.5, 0.1, 0.01]
for quant in quants:
quant_df = ll_df.quantile(quant)
pred_ll['CDF_LogLL_' + str(quant)] = quant_df.loc[cols[0]]
pred_ll['CDF_LLUnc_' + str(quant)] = quant_df.loc[cols[1]]
new_cols = pred_ll.columns.values.tolist()
cdf_cols = [col for col in new_cols if col not in old_cols]
return pred_ll, cdf_cols
def get_pred(XY, test_sample, unc, lbls, nonlbls):
"""
Given a database of spent fuel entries and a test sample (nuclide
measurements only), calculates the log-likelihood (and LL-uncertainty) of
that sample against every database entry. Determines the max LL, and
therefore the corresponding prediction in the database. Also determines a
list of LL measurements that populate a CDF. Returns that prediction and LL
information as a single row dataframe.
Parameters
----------
XY : dataframe with nuclide measurements and reactor parameters
test_sample : numpy array of a sample to be predicted (nuclide measurements
only)
unc : float that represents the simulation uncertainty in nuclide
measurements
lbls : list of reactor parameters to be predicted
nonlbls : list of reactor parameters that aren't being predicted
Returns
-------
pred_ll : single-row dataframe including the prediction (i.e., predicted
labels), its max log-likelihood/uncertainty, and a list of
log-likelihoods and their uncertainties to populate a CDF
"""
ll_name = 'MaxLogLL'
unc_name = 'MaxLLUnc'
X = XY.drop(lbls+nonlbls, axis=1).copy().to_numpy()
XY[ll_name] = ll_calc(X, test_sample, unc)
XY[unc_name] = unc_calc(X, test_sample, unc)
pred_row = XY.loc[XY.index == XY[ll_name].idxmax()].copy()
pred_ll, cdf_cols = ll_cdf(pred_row, XY[[ll_name, unc_name]])
cdf_cols = [ll_name, unc_name] + cdf_cols
pred_ll = format_pred(pred_ll, lbls, nonlbls, cdf_cols)
# need to delete calculated columns so next test sample can be calculated
XY.drop(columns=[ll_name, unc_name], inplace=True)
return pred_ll
def mll_testset(XY, test, ext_test, unc, lbls, nonlbls):
"""
Given a database of spent fuel entries containing a nuclide vector and the
reactor operation parameters, and an equally formatted database of test
cases to predict, this function loops through the test database to perform
a series of predictions. It first formats the test sample for prediction,
then gathers all the predictions from the test database entries
Parameters
----------
XY : dataframe with nuclide measurements and reactor parameters
test : dataframe with test cases to predict in same format as train
ext_test : boolean indicating which of external test set or LOOV is being
performed
unc : float that represents the simulation uncertainty in nuclide
measurements
lbls : list of reactor parameters to be predicted
nonlbls : list of reactor parameters that aren't being predicted
Returns
-------
pred_df : dataframe with ground truth and predictions
"""
pred_df = pd.DataFrame()
for sim_idx, row in test.iterrows():
if ext_test:
test_sample = row.drop(lbls)
test_answer = row[lbls]
pred_ll = get_pred(XY, test_sample.to_numpy(), unc, lbls, nonlbls)
all_lbls = lbls
else:
test_sample = row.drop(lbls+nonlbls)
test_answer = row[lbls+nonlbls]
pred_ll = get_pred(XY.drop(sim_idx), test_sample.to_numpy(), unc, lbls, nonlbls)
all_lbls = lbls + nonlbls
if pred_df.empty:
pred_df = pd.DataFrame(columns = pred_ll.columns.to_list())
pred_df = pred_df.append(pred_ll)
pred_df = pd.concat([test.loc[:, all_lbls].rename_axis('sim_idx').reset_index(),
pred_df.rename_axis('pred_idx').reset_index()
], axis=1)
return pred_df
def check_traindb_equal(final, db_path, arg_ratios, ratio_list, lbls):
"""
Checks at end of script that the database was not altered
Parameters
----------
final : training database dataframe at end of script
db_path : path to pkl file containing training database
arg_ratios : Boolean arg indicating whether or not nuclide ratios are being used
ratio_list : list of ratios being created
lbls : all non-features (prediction labels and non-prediction labels)
"""
initial = pd.read_pickle(db_path)
if arg_ratios == True:
initial = ratios(initial, ratio_list, lbls)
if not initial.equals(final):
sys.exit('Final training database does not equal initial database')
return
def convert_g_to_mgUi(XY, Y_list):
"""
Converts nuclides from ORIGEN simulations measured in grams to
concentrations measured in mg / gUi
Parameters
----------
XY : dataframe of origen sims with nuclides measured in grams
Y_list : list of columns in DB that are not features (nuclides)
Returns
-------
XY : dataframe of origen sims with nuclides measured in mg / gUi
"""
nucs = XY.columns[~XY.columns.isin(Y_list)].tolist()
# [x (g) / 1e6 (gUi)] * [1000 (mg) / 1 (g)] = x / 1000
XY[nucs] = XY[nucs].div(1000, axis=0)
return XY
def parse_args(args):
"""
Command-line argument parsing
Parameters
----------
args :
Returns
-------
XY : cleaned and formatted training database
"""
parser = argparse.ArgumentParser(description='Performs maximum likelihood calculations for reactor parameter prediction.')
parser.add_argument('outdir', metavar='output-directory',
help='directory in which to organize output csv')
parser.add_argument('sim_unc', metavar='sim-uncertainty', type=float,
help='value of simulation uncertainty (in fraction) to apply to likelihood calculations')
parser.add_argument('train_db', metavar='reactor-db',
help='file path to a training set, e.g. /mnt/researchdrive/BOX_INTERNAL/opotowsky/*.pkl')
parser.add_argument('test_db', metavar='testing-set',
help='file path to an external testing set, e.g. ~/sfcompo/format_clean/sfcompo_nucXX.pkl')
parser.add_argument('outfile', metavar='csv-output',
help='name for csv output file')
parser.add_argument('db_rows', metavar='db-interval', nargs=2, type=int,
help='indices of the database interval for the job')
parser.add_argument('--ext-test', dest='ext_test', action='store_true',
help='execute script with external testing set by providing file path to a testing set')
parser.add_argument('--no-ext-test', dest='ext_test', action='store_false',
help='do not execute script with external testing set')
parser.add_argument('--ratios', dest='ratios', action='store_true',
help='compute isotopic ratios instead of using concentrations')
parser.add_argument('--no-ratios', dest='ratios', action='store_false',
help='compute using concentrations instead of isotopic ratios')
return parser.parse_args(args)
def main():
"""
Given a database of spent fuel entries (containing nuclide measurements and
labels of reactor operation parameters of interest for prediction) and a
testing database containing spent fuel entries formatted in the same way,
this script calculates the maximum log-likelihood of each test sample
against the database for a prediction. The errors of those predictions are
then calculated and saved as a CSV file.
"""
args = parse_args(sys.argv[1:])
# training set
XY = pd.read_pickle(args.train_db)
if 'total' in XY.columns:
XY.drop('total', axis=1, inplace=True)
lbls = ['ReactorType', 'CoolingTime', 'Enrichment', 'Burnup',
'OrigenReactor']
nonlbls = ['AvgPowerDensity', 'ModDensity', 'UiWeight']
# testing set
if args.ext_test == True:
test = pd.read_pickle(args.test_db)
# In-script test: order of columns must match:
xy_cols = XY.columns.tolist()
for col in nonlbls: xy_cols.remove(col)
if xy_cols != test.columns.tolist():
if sorted(xy_cols) == sorted(test.columns.tolist()):
test = test[xy_cols]
else:
sys.exit('Feature sets are different')
# slice test set
test = test.iloc[args.db_rows[0]:args.db_rows[1]]
# converting train DB to match units in sfcompo DB
XY = convert_g_to_mgUi(XY, lbls+nonlbls)
else:
test = XY.iloc[args.db_rows[0]:args.db_rows[1]]
# this is a fix for the now too-large db to test every entry
# 3 lines per job, with max_jobs currently set to 9900
# (~6% of db is tested)
#test = test.sample(3)
# TODO: need some better way to handle varying ratio lists
tamu_list = ['cs137/cs133', 'cs134/cs137', 'cs135/cs137', 'ba136/ba138',
'sm150/sm149', 'sm152/sm149', 'eu154/eu153', 'pu240/pu239',
'pu241/pu239', 'pu242/pu239'
]
ratio_list = tamu_list
if args.ratios == True:
XY = ratios(XY, ratio_list, lbls+nonlbls)
test = ratios(test, ratio_list, lbls)
unc = float(args.sim_unc)
pred_df = mll_testset(XY, test, args.ext_test, unc, lbls, nonlbls)
fname = args.outfile + '.csv'
pred_df.to_csv(fname)
return
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
""" cmddocs Class """
import os
import cmd
import sys
import signal
import configparser
import git
import pkg_resources
from cmddocs.articles import *
from cmddocs.completions import *
from cmddocs.version import __version__
class Cmddocs(cmd.Cmd):
""" Basic commandline interface class """
def __init__(self, conf="~/.cmddocsrc"):
"""
Initialize the class
Inherit from Cmd
Read config, initialize Datadir, create Prompt
"""
cmd.Cmd.__init__(self)
self.reset = '\033[0m'
self.read_config(self, conf)
self.initialize_docs(self)
self.prompt = '\033[1m\033[' + self.promptcol + 'm' + self.prompt + " " + self.reset
self.do_cd(self.datadir)
def read_config(self, sconf, conf):
"""
All Config Options being read and defaulting
"""
self.colors = {}
config = configparser.ConfigParser()
if not config.read(os.path.expanduser(conf)):
print("Error: your config %s could not be read" % conf)
exit(1)
try:
self.datadir = os.path.expanduser(config.get("General", "Datadir"))
except configparser.NoOptionError:
print("Error: Please set a Datadir in %s" % conf)
exit(1)
try:
self.exclude = os.path.expanduser(config.get("General", "Excludedir"))
except configparser.NoOptionError:
self.exclude = os.path.expanduser('.git/')
try:
self.default_commit_msg = config.get("General", "Default_Commit_Message")
except configparser.NoOptionError:
self.default_commit_msg = "small changes"
try:
self.editor = config.get("General", "Editor")
except configparser.NoOptionError:
if os.environ.get('EDITOR') is not None:
self.editor = os.environ.get('EDITOR')
else:
print("Error: Could not find usable editor.")
print("Please specify one in config or set EDITOR in your \
OS Environment")
exit(1)
try:
self.pager = config.get("General", "Pager")
except configparser.NoOptionError:
if os.environ.get('PAGER') is not None:
self.editor = os.environ.get('PAGER')
else:
print("Error: Could not find usable Pager.")
print("Please specify one in config or set PAGER in your\
OS Environment")
exit(1)
try:
self.pagerflags = config.get("General", "PagerFlags")
except configparser.NoOptionError:
self.pagerflags = False
try:
self.editorflags = config.get("General", "EditorFlags")
except configparser.NoOptionError:
self.editorflags = False
try:
self.prompt = config.get("General", "Prompt")
except configparser.NoOptionError:
self.prompt = "cmddocs>"
try:
self.promptcol = config.get("General", "Promptcolor")
except configparser.NoOptionError:
self.promptcol = "37"
try:
self.intro = config.get("General", "Intro_Message")
except configparser.NoOptionError:
self.intro = "cmddocs - press ? for help"
try:
self.mailfrom = config.get("General", "Mail")
except configparser.NoOptionError:
self.mailfrom = "nobody"
try:
self.extension = config.get("General", "Default_Extension")
except configparser.NoOptionError:
self.extension = "md"
try:
self.colors['h1'] = config.get("Colors", "Header12")
except (configparser.NoOptionError, configparser.NoSectionError):
self.colors['h1'] = "37"
try:
self.colors['h2'] = config.get("Colors", "Header345")
except (configparser.NoOptionError, configparser.NoSectionError):
self.colors['h2'] = "92"
try:
self.colors['code'] = config.get("Colors", "Codeblock")
except (configparser.NoOptionError, configparser.NoSectionError):
self.colors['code'] = "92"
return
def initialize_docs(self, docs):
""" Read or initialize git repository """
try:
self.repo = git.Repo(self.datadir)
except git.exc.NoSuchPathError:
print("Error: Specified datadir %s does not exist" % self.datadir)
exit(1)
except git.exc.InvalidGitRepositoryError:
self.repo = git.Repo.init(self.datadir)
try:
self.repo.git.add(".")
self.repo.git.commit(m=" init")
except git.exc.GitCommandError:
pass
print("Successfully created and initialized empty repo at %s" % self.datadir)
# Change to datadir
try:
os.chdir(self.datadir)
self.cwd = os.getcwd()
except OSError:
print("Error: Switching to Datadir %s not possible" % self.datadir)
exit(1)
def do_list(self, dir):
"""
Show files in current working dir
Usage:
list
l
list Databases/
"""
if not dir:
dir = "."
return list_articles(dir, self.extension)
do_l = do_list
do_ls = do_list
def do_dirs(self, dir):
"""
Show directories in current working dir
Usage:
dirs
d
dirs Databases/
"""
if not dir:
dir = "."
return list_directories(dir)
do_d = do_dirs
def do_cd(self, dir):
"""
Change directory
Usage:
cd Programming/
cd
"""
change_directory(dir, self.datadir)
def do_pwd(self, line):
"""
Show current directory
Usage:
pwd
"""
print(os.path.relpath(os.getcwd(), self.datadir))
def do_edit(self, article, test=False):
"""
Edit or create new article.
Usage:
edit databases/mongodb
edit intro
"""
return edit_article(article, os.getcwd(), self.editor, self.repo,
self.default_commit_msg, self.extension, test, self.editorflags)
do_e = do_edit
def do_view(self, article):
"""
View an article. Creates temporary file with converted markdown to
ansi colored output. Opens your PAGER. (Only less supported atm)
Usage:
view databases/mongodb
view intro
"""
return view_article(article, os.getcwd(), self.pager, self.extension,
self.pagerflags, self.colors)
def do_mail(self, article):
"""
Mail an article to a friend
Usage:
mail databases/mongodb
Recipient: <EMAIL>
mail programming/r/loops
mail intro
"""
return mail_article(article, os.getcwd(), self.mailfrom, self.extension)
def do_delete(self, article):
"""
Delete an article
Usage:
delete databases/mongodb
rm databases/mssql
"""
delete_article(article, os.getcwd(), self.repo, self.extension)
do_rm = do_delete
def do_move(self, args):
"""
Move an article to a new location
Usage:
move databases/mongodb databases/MongoDB
move life/foo notes/foo
mv life/foo notes/foo
"""
move_article(os.getcwd(), args, self.repo, self.extension)
do_mv = do_move
def do_search(self, keyword):
"""
Search for keyword in current directory
Usage:
search mongodb
search foo
"""
print(search_article(keyword, os.getcwd(), self.datadir,
self.exclude))
def do_status(self, line):
"""
Show git repo status of your docs
Usage:
status
"""
print(self.repo.git.status())
def do_log(self, args):
"""
Show git logs of your docs.
Usage:
log # default loglines: 10)
log 20 # show 20 loglines
log 20 article # show log for specific article
log databases/mongodb 3 # same
"""
show_log(args, self.repo, self.extension)
def do_info(self, article):
"""
Show infos for an article
Usage:
info article
info Databases/mongodb
Created: 2014-01-18 11:18:03 +0100
Updated: 2015-10-23 14:14:44 +0200
Commits: 26
Lines: 116
Words: 356
Characters: 2438
"""
info_article(article, os.getcwd(), self.repo, self.extension)
def do_diff(self, args):
"""
Show git diffs between files and commits
Usage:
diff 7 # show diff for last 7 changes
diff 1 article # show diff for last change to article
diff # show last 5 diffs
"""
show_diff(args, self.repo, self.extension)
def do_undo(self, args):
"""
You can revert your changes (use revert from git)
Usage:
undo HEAD
undo 355f375
Will ask for confirmation.
"""
undo_change(args, self.repo)
def do_stats(self, args):
"""
Calculate some statistics on your docs
Usage:
stats
"""
show_stats(args, self.repo, self.datadir)
def do_version(self, args):
"""
Show version of cmddocs
Usage:
version
"""
print("cmddocs %s" % __version__)
do_revert = do_undo
### exit
def do_exit(self, args):
"""
Exit cmddocs
Usage:
exit
"""
return True
do_EOF = do_exit
### completions
complete_l = path_complete
complete_ls = path_complete
complete_list = path_complete
complete_d = path_complete
complete_dirs = path_complete
complete_view = path_complete
complete_cd = path_complete
complete_e = path_complete
complete_edit = path_complete
complete_rm = path_complete
complete_delete = path_complete
complete_mail = path_complete
complete_mv = path_complete
complete_move = path_complete
complete_log = path_complete
complete_info = path_complete
def ctrlc(sig, frame):
""" Handle Interrupts """
print("\n")
sys.exit(0)
signal.signal(signal.SIGINT, ctrlc)
def main():
""" Call loop method """
Cmddocs().cmdloop()
if __name__ == '__main__':
main()
|
<filename>sdk/loadtestservice/azure-mgmt-loadtestservice/azure/mgmt/loadtestservice/models/_models_py3.py<gh_stars>1000+
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._load_test_client_enums import *
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~load_test_client.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~load_test_client.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~load_test_client.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~load_test_client.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~load_test_client.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = tags
self.location = location
class LoadTestResource(TrackedResource):
"""LoadTest details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~load_test_client.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: The type of identity used for the resource.
:type identity: ~load_test_client.models.SystemAssignedServiceIdentity
:param description: Description of the resource.
:type description: str
:ivar provisioning_state: Resource provisioning state. Possible values include: "Succeeded",
"Failed", "Canceled", "Deleted".
:vartype provisioning_state: str or ~load_test_client.models.ResourceState
:ivar data_plane_uri: Resource data plane URI.
:vartype data_plane_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'description': {'max_length': 512, 'min_length': 0},
'provisioning_state': {'readonly': True},
'data_plane_uri': {'readonly': True, 'max_length': 2083, 'min_length': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'SystemAssignedServiceIdentity'},
'description': {'key': 'properties.description', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'data_plane_uri': {'key': 'properties.dataPlaneURI', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
identity: Optional["SystemAssignedServiceIdentity"] = None,
description: Optional[str] = None,
**kwargs
):
super(LoadTestResource, self).__init__(tags=tags, location=location, **kwargs)
self.identity = identity
self.description = description
self.provisioning_state = None
self.data_plane_uri = None
class LoadTestResourcePageList(msrest.serialization.Model):
"""List of resources page result.
:param value: List of resources in current page.
:type value: list[~load_test_client.models.LoadTestResource]
:param next_link: Link to next page of resources.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LoadTestResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["LoadTestResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(LoadTestResourcePageList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class LoadTestResourcePatchRequestBody(msrest.serialization.Model):
"""LoadTest resource patch request body.
:param tags: A set of tags. Resource tags.
:type tags: any
:param identity: The type of identity used for the resource.
:type identity: ~load_test_client.models.SystemAssignedServiceIdentity
:param properties: Load Test resource properties.
:type properties: ~load_test_client.models.LoadTestResourcePatchRequestBodyProperties
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': 'object'},
'identity': {'key': 'identity', 'type': 'SystemAssignedServiceIdentity'},
'properties': {'key': 'properties', 'type': 'LoadTestResourcePatchRequestBodyProperties'},
}
def __init__(
self,
*,
tags: Optional[Any] = None,
identity: Optional["SystemAssignedServiceIdentity"] = None,
properties: Optional["LoadTestResourcePatchRequestBodyProperties"] = None,
**kwargs
):
super(LoadTestResourcePatchRequestBody, self).__init__(**kwargs)
self.tags = tags
self.identity = identity
self.properties = properties
class LoadTestResourcePatchRequestBodyProperties(msrest.serialization.Model):
"""Load Test resource properties.
:param description: Description of the resource.
:type description: str
"""
_validation = {
'description': {'max_length': 512, 'min_length': 0},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
**kwargs
):
super(LoadTestResourcePatchRequestBodyProperties, self).__init__(**kwargs)
self.description = description
class Operation(msrest.serialization.Model):
"""Details of a REST API operation, returned from the Resource Provider Operations API.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the operation, as per Resource-Based Access Control (RBAC). Examples:
"Microsoft.Compute/virtualMachines/write", "Microsoft.Compute/virtualMachines/capture/action".
:vartype name: str
:ivar is_data_action: Whether the operation applies to data-plane. This is "true" for
data-plane operations and "false" for ARM/control-plane operations.
:vartype is_data_action: bool
:param display: Localized display information for this particular operation.
:type display: ~load_test_client.models.OperationDisplay
:ivar origin: The intended executor of the operation; as in Resource Based Access Control
(RBAC) and audit logs UX. Default value is "user,system". Possible values include: "user",
"system", "user,system".
:vartype origin: str or ~load_test_client.models.Origin
:ivar action_type: Enum. Indicates the action type. "Internal" refers to actions that are for
internal only APIs. Possible values include: "Internal".
:vartype action_type: str or ~load_test_client.models.ActionType
"""
_validation = {
'name': {'readonly': True},
'is_data_action': {'readonly': True},
'origin': {'readonly': True},
'action_type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.is_data_action = None
self.display = display
self.origin = None
self.action_type = None
class OperationDisplay(msrest.serialization.Model):
"""Localized display information for this particular operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: The localized friendly form of the resource provider name, e.g. "Microsoft
Monitoring Insights" or "Microsoft Compute".
:vartype provider: str
:ivar resource: The localized friendly name of the resource type related to this operation.
E.g. "Virtual Machines" or "Job Schedule Collections".
:vartype resource: str
:ivar operation: The concise, localized friendly name for the operation; suitable for
dropdowns. E.g. "Create or Update Virtual Machine", "Restart Virtual Machine".
:vartype operation: str
:ivar description: The short, localized friendly description of the operation; suitable for
tool tips and detailed views.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationListResult(msrest.serialization.Model):
"""A list of REST API operations supported by an Azure Resource Provider. It contains an URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of operations supported by the resource provider.
:vartype value: list[~load_test_client.models.Operation]
:ivar next_link: URL to get the next set of operation list results (if there are any).
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SystemAssignedServiceIdentity(msrest.serialization.Model):
"""Managed service identity (either system assigned, or none).
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The service principal ID of the system assigned identity. This property
will only be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
provided for a system assigned identity.
:vartype tenant_id: str
:param type: Required. Type of managed service identity (either system assigned, or none).
Possible values include: "None", "SystemAssigned".
:type type: str or ~load_test_client.models.SystemAssignedServiceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
type: Union[str, "SystemAssignedServiceIdentityType"],
**kwargs
):
super(SystemAssignedServiceIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~load_test_client.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~load_test_client.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware vCloud platform.
"""
import os
import subprocess
import shutil
import time
import urllib2
from oslo.config import cfg
import sshclient
from nova.compute import power_state
from nova.compute import task_states
from nova import image
from nova.openstack.common import log as logging
from nova.openstack.common import fileutils as fileutils
from nova.i18n import _
from nova.virt.hybrid.common import fake_driver
from nova.virt.hybrid.common import common_tools
from nova.virt.hybrid.vcloud import hyper_agent_api
from nova.virt.hybrid.vcloud import util
from nova.virt.hybrid.vcloud.vcloud import VCLOUD_STATUS
from nova.virt.hybrid.vcloud.vcloud_client import VCloudClient
from nova.volume.cinder import API as cinder_api
from nova.network import neutronv2
vcloudapi_opts = [
cfg.StrOpt('vcloud_node_name',
default='vcloud_node_01',
help='node name, which a node is a vcloud vcd'
'host.'),
cfg.StrOpt('vcloud_host_ip',
help='Hostname or IP address for connection to VMware VCD '
'host.'),
cfg.IntOpt('vcloud_host_port',
default=443,
help='Host port for cnnection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_host_username',
help='Host username for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_host_password',
help='Host password for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_org',
help='User org for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_vdc',
help='Vdc for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_version',
default='5.5',
help='Version for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_service',
default='85-719',
help='Service for connection to VMware VCD '
'host.'),
cfg.BoolOpt('vcloud_verify',
default=False,
help='Verify for connection to VMware VCD '
'host.'),
cfg.BoolOpt('use_link_clone',
default=True,
help='Use link clone or not '),
cfg.StrOpt('vcloud_service_type',
default='vcd',
help='Service type for connection to VMware VCD '
'host.'),
cfg.IntOpt('vcloud_api_retry_count',
default=2,
help='Api retry count for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_conversion_dir',
default='/vcloud/convert_tmp',
help='the directory where images are converted in '),
cfg.StrOpt('vcloud_volumes_dir',
default='/vcloud/volumes',
help='the directory of volume files'),
cfg.StrOpt('vcloud_vm_naming_rule',
default='openstack_vm_id',
help='the rule to name vcloud VMs, valid options:'
'openstack_vm_id, openstack_vm_name, cascaded_openstack_rule'),
cfg.DictOpt('vcloud_flavor_map',
default={
'm1.tiny': '1',
'm1.small': '2',
'm1.medium': '3',
'm1.large': '4',
'm1.xlarge': '5'},
help='map nova flavor name to vcloud vm specification id'),
cfg.StrOpt('metadata_iso_catalog',
default='metadata-isos',
help='The metadata iso cotalog.'),
cfg.StrOpt('provider_base_network_name',
help='The provider network name which base provider network use.'),
cfg.StrOpt('provider_tunnel_network_name',
help='The provider network name which tunnel provider network use.'),
cfg.StrOpt('image_user',
default='',
help=''),
cfg.StrOpt('image_password',
default='',
help=''),
cfg.StrOpt('tunnel_cidr',
help='The tunnel cidr of provider network.'),
cfg.StrOpt('route_gw',
help='The route gw of the provider network.')
]
status_dict_vapp_to_instance = {
VCLOUD_STATUS.FAILED_CREATION: power_state.CRASHED,
VCLOUD_STATUS.UNRESOLVED: power_state.NOSTATE,
VCLOUD_STATUS.RESOLVED: power_state.NOSTATE,
VCLOUD_STATUS.DEPLOYED: power_state.NOSTATE,
VCLOUD_STATUS.SUSPENDED: power_state.SUSPENDED,
VCLOUD_STATUS.POWERED_ON: power_state.RUNNING,
VCLOUD_STATUS.WAITING_FOR_INPUT: power_state.NOSTATE,
VCLOUD_STATUS.UNKNOWN: power_state.NOSTATE,
VCLOUD_STATUS.UNRECOGNIZED: power_state.NOSTATE,
VCLOUD_STATUS.POWERED_OFF: power_state.SHUTDOWN,
VCLOUD_STATUS.INCONSISTENT_STATE: power_state.NOSTATE,
VCLOUD_STATUS.MIXED: power_state.NOSTATE,
VCLOUD_STATUS.DESCRIPTOR_PENDING: power_state.NOSTATE,
VCLOUD_STATUS.COPYING_CONTENTS: power_state.NOSTATE,
VCLOUD_STATUS.DISK_CONTENTS_PENDING: power_state.NOSTATE,
VCLOUD_STATUS.QUARANTINED: power_state.NOSTATE,
VCLOUD_STATUS.QUARANTINE_EXPIRED: power_state.NOSTATE,
VCLOUD_STATUS.REJECTED: power_state.NOSTATE,
VCLOUD_STATUS.TRANSFER_TIMEOUT: power_state.NOSTATE,
VCLOUD_STATUS.VAPP_UNDEPLOYED: power_state.NOSTATE,
VCLOUD_STATUS.VAPP_PARTIALLY_DEPLOYED: power_state.NOSTATE,
}
CONF = cfg.CONF
CONF.register_opts(vcloudapi_opts, 'vcloud')
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
class VCloudDriver(fake_driver.FakeNovaDriver):
"""The VCloud host connection object."""
def __init__(self, virtapi, scheme="https"):
self._node_name = CONF.vcloud.vcloud_node_name
self._vcloud_client = VCloudClient(scheme=scheme)
self.cinder_api = cinder_api()
if not os.path.exists(CONF.vcloud.vcloud_conversion_dir):
os.makedirs(CONF.vcloud.vcloud_conversion_dir)
if not os.path.exists(CONF.vcloud.vcloud_volumes_dir):
os.makedirs(CONF.vcloud.vcloud_volumes_dir)
self.hyper_agent_api = hyper_agent_api.HyperAgentAPI()
super(VCloudDriver, self).__init__(virtapi)
def _update_vm_task_state(self, instance, task_state):
instance.task_state = task_state
instance.save()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
LOG.info('begin time of vcloud create vm is %s' %
(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
image_cache_dir = CONF.vcloud.vcloud_conversion_dir
volume_cache_dir = CONF.vcloud.vcloud_volumes_dir
# update port bind host
self._binding_host(context, network_info, instance.uuid)
this_conversion_dir = '%s/%s' % (CONF.vcloud.vcloud_conversion_dir,
instance.uuid)
fileutils.ensure_tree(this_conversion_dir)
os.chdir(this_conversion_dir)
#0: create metadata iso and upload to vcloud
rabbit_host = CONF.rabbit_host
if 'localhost' in rabbit_host or '127.0.0.1' in rabbit_host:
rabbit_host = CONF.rabbit_hosts[0]
if ':' in rabbit_host:
rabbit_host = rabbit_host[0:rabbit_host.find(':')]
iso_file = common_tools.create_user_data_iso(
"userdata.iso",
{"rabbit_userid": CONF.rabbit_userid,
"rabbit_password": <PASSWORD>,
"rabbit_host": rabbit_host,
"host": instance.uuid,
"tunnel_cidr": CONF.vcloud.tunnel_cidr,
"route_gw": CONF.vcloud.route_gw},
this_conversion_dir)
vapp_name = self._get_vcloud_vapp_name(instance)
metadata_iso = self._vcloud_client.upload_metadata_iso(iso_file,
vapp_name)
# 0.get vorg, user name,password vdc from configuration file (only one
# org)
# 1.1 get image id, vm info ,flavor info
# image_uuid = instance.image_ref
if 'id' in image_meta:
# create from image
image_uuid = image_meta['id']
else:
# create from volume
image_uuid = image_meta['properties']['image_id']
#NOTE(nkapotoxin): create vapp with vapptemplate
network_names = [CONF.vcloud.provider_tunnel_network_name, CONF.vcloud.provider_base_network_name]
network_configs = self._vcloud_client.get_network_configs(network_names)
# create vapp
if CONF.vcloud.use_link_clone:
vapp = self._vcloud_client.create_vapp(vapp_name, image_uuid , network_configs)
else:
vapp = self._vcloud_client.create_vapp(vapp_name,image_uuid , network_configs,
root_gb=instance.get_flavor().root_gb)
# generate the network_connection
network_connections = self._vcloud_client.get_network_connections(vapp, network_names)
# update network
self._vcloud_client.update_vms_connections(vapp, network_connections)
# update vm specification
self._vcloud_client.modify_vm_cpu(vapp, instance.get_flavor().vcpus)
self._vcloud_client.modify_vm_memory(vapp, instance.get_flavor().memory_mb)
# mount it
self._vcloud_client.insert_media(vapp_name, metadata_iso)
# power on it
self._vcloud_client.power_on_vapp(vapp_name)
# 7. clean up
shutil.rmtree(this_conversion_dir, ignore_errors=True)
LOG.info('end time of vcloud create vm is %s' %
(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
# update port bind host
self._binding_host(context, network_info, instance.uuid)
@staticmethod
def _binding_host(context, network_info, host_id):
neutron = neutronv2.get_client(context, admin=True)
port_req_body = {'port': {'binding:host_id': host_id}}
for vif in network_info:
neutron.update_port(vif.get('id'), port_req_body)
def _get_vcloud_vapp_name(self, instance):
if CONF.vcloud.vcloud_vm_naming_rule == 'openstack_vm_id':
return instance.uuid
elif CONF.vcloud.vcloud_vm_naming_rule == 'openstack_vm_name':
return instance.display_name
elif CONF.vcloud.vcloud_vm_naming_rule == 'cascaded_openstack_rule':
return instance.display_name
else:
return instance.uuid
def _get_vcloud_volume_name(self, volume_id, volume_name):
prefix = 'volume@'
if volume_name.startswith(prefix):
vcloud_volume_name = volume_name[len(prefix):]
else:
vcloud_volume_name = volume_id
return vcloud_volume_name
def _download_vmdk_from_vcloud(self, context, src_url, dst_file_name):
# local_file_handle = open(dst_file_name, "wb")
local_file_handle = fileutils.file_open(dst_file_name, "wb")
remote_file_handle = urllib2.urlopen(src_url)
file_size = remote_file_handle.headers['content-length']
util.start_transfer(context, remote_file_handle, file_size,
write_file_handle=local_file_handle)
def _upload_image_to_glance(
self, context, src_file_name, image_id, instance):
vm_task_state = instance.task_state
file_size = os.path.getsize(src_file_name)
read_file_handle = fileutils.file_open(src_file_name, "rb")
metadata = IMAGE_API.get(context, image_id)
# The properties and other fields that we need to set for the image.
image_metadata = {"disk_format": "qcow2",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"owner_id": instance['project_id']}}
util.start_transfer(context,
read_file_handle,
file_size,
image_id=metadata['id'],
image_meta=image_metadata,
task_state=task_states.IMAGE_UPLOADING,
instance=instance)
self._update_vm_task_state(instance, task_state=vm_task_state)
#TODO: test it
def snapshot(self, context, instance, image_id, update_task_state):
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
# 1. get vmdk url
vapp_name = self._get_vcloud_vapp_name(instance)
remote_vmdk_url = self._vcloud_client.query_vmdk_url(vapp_name)
# 2. download vmdk
temp_dir = '%s/%s' % (CONF.vcloud.vcloud_conversion_dir, instance.uuid)
fileutils.ensure_tree(temp_dir)
vmdk_name = remote_vmdk_url.split('/')[-1]
local_file_name = '%s/%s' % (temp_dir, vmdk_name)
self._download_vmdk_from_vcloud(
context,
remote_vmdk_url,
local_file_name)
# 3. convert vmdk to qcow2
converted_file_name = temp_dir + '/converted-file.qcow2'
convert_commond = "qemu-img convert -f %s -O %s %s %s" % \
('vmdk',
'qcow2',
local_file_name,
converted_file_name)
convert_result = subprocess.call([convert_commond], shell=True)
if convert_result != 0:
# do something, change metadata
LOG.error('converting file failed')
# 4. upload qcow2 to image repository\
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._upload_image_to_glance(
context,
converted_file_name,
image_id,
instance)
# 5. delete temporary files
shutil.rmtree(temp_dir, ignore_errors=True)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
LOG.debug('[vcloud nova driver] begin reboot instance: %s' %
instance.uuid)
vapp_name = self._get_vcloud_vapp_name(instance)
try:
self._vcloud_client.reboot_vapp(vapp_name)
except Exception as e:
LOG.error('reboot instance %s failed, %s' % (vapp_name, e))
def power_off(self, instance, shutdown_timeout=0, shutdown_attempts=0):
LOG.debug('[vcloud nova driver] begin reboot instance: %s' %
instance.uuid)
vapp_name = self._get_vcloud_vapp_name(instance)
try:
self._vcloud_client.power_off_vapp(vapp_name)
except Exception as e:
LOG.error('power off failed, %s' % e)
def power_on(self, context, instance, network_info, block_device_info):
vapp_name = self._get_vcloud_vapp_name(instance)
self._vcloud_client.power_on_vapp(vapp_name)
def _do_destroy_vm(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
vapp_name = self._get_vcloud_vapp_name(instance)
try:
self._vcloud_client.power_off_vapp(vapp_name)
except Exception as e:
LOG.error('power off failed, %s' % e)
vm_task_state = instance.task_state
self._update_vm_task_state(instance, vm_task_state)
try:
self._vcloud_client.delete_vapp(vapp_name)
except Exception as e:
LOG.error('delete vapp failed %s' % e)
try:
self._vcloud_client.delete_metadata_iso(vapp_name)
except Exception as e:
LOG.error('delete metadata iso failed %s' % e)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
LOG.debug('[vcloud nova driver] destroy: %s' % instance.uuid)
self._do_destroy_vm(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
# delete agent
instance_id = instance.uuid
neutron_client = neutronv2.get_client(context=None, admin=True)
agent = neutron_client.list_agents(host=instance_id)
if len(agent['agents']) == 1:
neutron_client.delete_agent(agent['agents'][0]['id'])
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
if destroy_vifs:
self.unplug_vifs(instance, network_info)
LOG.debug("Cleanup network finished", instance=instance)
def attach_interface(self, instance, image_meta, vif):
LOG.debug("attach_interface: %s, %s" % (instance, vif))
def detach_interface(self, instance, vif):
LOG.debug("detach_interface: %s, %s" % (instance, vif))
def _get_vapp_ip(self, instance):
instance_id = instance.uuid
neutron_client = neutronv2.get_client(context=None, admin=True)
agent = neutron_client.list_agents(host=instance_id)
times=10
while len(agent['agents']) == 0:
if times==0:
break
time.sleep(10)
agent = neutron_client.list_agents(host=instance_id)
times = times - 1
if times==0:
return None
else:
return agent['agents'][0]['configurations']['tunneling_ip']
def _attach_volume_iscsi(self, instance, connection_info):
user = CONF.vcloud.image_user
pwd = <PASSWORD>
vapp_ip = self._get_vapp_ip(instance)
if vapp_ip:
host = vapp_ip
else:
LOG.error("vapp_ip is None ,attach volume failed")
raise Exception(_("vapp_ip is None ,attach volume failed"))
ssh_client = sshclient.SSH(user, host, password=<PASSWORD>)
target_iqn = connection_info['data']['target_iqn']
target_portal = connection_info['data']['target_portal']
cmd1 = "sudo iscsiadm -m node -T %s -p %s" % (target_iqn, target_portal)
while True:
try:
cmd1_status, cmd1_out, cmd1_err = ssh_client.execute(cmd1)
LOG.debug("sudo cmd1 info status=%s ,out=%s, err=%s " % (cmd1_status, cmd1_out, cmd1_err))
if cmd1_status in [21, 255]:
cmd2 = "sudo iscsiadm -m node -T %s -p %s --op new" % (target_iqn, target_portal)
cmd2_status, cmd2_out, cmd2_err = ssh_client.execute(cmd2)
LOG.debug("sudo cmd2 info status=%s ,out=%s, err=%s " % (cmd2_status, cmd2_out, cmd2_err))
break
except sshclient.SSHError:
LOG.debug("wait for vm to initialize network")
time.sleep(5)
cmd3 = "sudo iscsiadm -m session"
cmd3_status, cmd3_out, cmd3_err = ssh_client.execute(cmd3)
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in cmd3_out.splitlines() if p.startswith("tcp:")]
stripped_portal = connection_info['data']['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
connection_info['data']['target_iqn']]
) == 0:
cmd4 = "sudo iscsiadm -m node -T %s -p %s --login" % (target_iqn, target_portal)
cmd4_status, cmd4_out, cmd4_err = ssh_client.execute(cmd4)
LOG.debug("sudo cmd4 info status=%s ,out=%s, err=%s " % (cmd4_status, cmd4_out, cmd4_err))
cmd5 = "sudo iscsiadm -m node -T %s -p %s --op update -n node.startup -v automatic" % \
(target_iqn, target_portal)
cmd5_status, cmd5_out, cmd5_err = ssh_client.execute(cmd5)
LOG.debug("sudo cmd5 info status=%s ,out=%s, err=%s " % (cmd5_status, cmd5_out, cmd5_err))
ssh_client.close()
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
instance_name = instance['display_name']
LOG.debug("Attach_volume: %(connection_info)s to %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
volume_id = connection_info['data']['volume_id']
driver_type = connection_info['driver_volume_type']
volume = self.cinder_api.get(context, volume_id)
volume_name = volume['display_name']
# use volume_name as vcloud disk name, remove prefix str `volume@`
# if volume_name does not start with volume@, then use volume id instead
vcloud_volume_name = self._get_vcloud_volume_name(volume_id,
volume_name)
# find volume reference by it's name
vapp_name = self._get_vcloud_vapp_name(instance)
if driver_type == 'iscsi':
self._attach_volume_iscsi(instance, connection_info)
return
result, resp = self._vcloud_client.get_disk_ref(vcloud_volume_name)
if result:
LOG.debug("Find volume successful, disk name is: %(disk_name)s"
"disk ref's href is: %(disk_href)s.",
{'disk_name': vcloud_volume_name,
'disk_href': resp.href})
else:
LOG.error(_('Unable to find volume %s to instance'),
vcloud_volume_name)
if self._vcloud_client.attach_disk_to_vm(vapp_name, resp):
LOG.info("Volume %(volume_name)s attached to: %(instance_name)s",
{'volume_name': vcloud_volume_name,
'instance_name': instance_name})
def _detach_volume_iscsi(self, instance, connection_info):
user = CONF.vcloud.image_user
pwd = CONF.vcloud.image_password
vapp_ip = self._get_vapp_ip(instance)
if vapp_ip:
host = vapp_ip
else:
LOG.debug("vapp_ip is None ,attach volume failed")
raise
ssh_client = sshclient.SSH(user, host, password=<PASSWORD>)
target_iqn = connection_info['data']['target_iqn']
target_portal = connection_info['data']['target_portal']
cmd1 = "ls -l /dev/disk/by-path/ | grep %s | awk -F '/' '{print $NF}'" % target_iqn
cmd1_status, cmd1_out, cmd1_err = ssh_client.execute(cmd1)
LOG.debug(" cmd1 info status=%s ,out=%s, err=%s " % (cmd1_status, cmd1_out, cmd1_err))
device = "/dev/" + cmd1_out.split('\n')[0]
path = "/sys/block/" + cmd1_out.split('\n')[0] + "/device/delete"
cmd2 = "sudo blockdev --flushbufs %s" % device
cmd2_status, cmd2_out, cmd2_err = ssh_client.execute(cmd2)
LOG.debug(" cmd2 info status=%s ,out=%s, err=%s " % (cmd2_status, cmd2_out, cmd2_err))
cmd3 = "echo 1 | sudo tee -a %s" % path
cmd3_status, cmd3_out, cmd3_err = ssh_client.execute(cmd3)
LOG.debug("sudo cmd3 info status=%s ,out=%s, err=%s " % (cmd3_status, cmd3_out, cmd3_err))
cmd4 = "sudo iscsiadm -m node -T %s -p %s --op update -n node.startup -v manual" % (target_iqn, target_portal)
cmd4_status, cmd4_out, cmd4_err = ssh_client.execute(cmd4)
LOG.debug("sudo cmd4 info status=%s ,out=%s, err=%s " % (cmd4_status, cmd4_out, cmd4_err))
cmd5 = "sudo iscsiadm -m node -T %s -p %s --logout" % (target_iqn, target_portal)
cmd5_status, cmd5_out, cmd5_err = ssh_client.execute(cmd5)
LOG.debug("sudo cmd5 info status=%s ,out=%s, err=%s " % (cmd5_status, cmd5_out, cmd5_err))
cmd6 = "sudo iscsiadm -m node -T %s -p %s --op delete" % (target_iqn, target_portal)
cmd6_status, cmd6_out, cmd6_err = ssh_client.execute(cmd6)
LOG.debug("sudo cmd6 info status=%s ,out=%s, err=%s " % (cmd6_status, cmd6_out, cmd6_err))
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
instance_name = instance['display_name']
LOG.debug("Detach_volume: %(connection_info)s to %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
volume_id = connection_info['data']['volume_id']
driver_type = connection_info['driver_volume_type']
if driver_type == 'iscsi':
self._detach_volume_iscsi(instance, connection_info)
return
volume_name = connection_info['data']['display_name']
# use volume_name as vcloud disk name, remove prefix str `volume@`
# if volume_name does not start with volume@, then use volume id instead
vcloud_volume_name = self._get_vcloud_volume_name(volume_id,
volume_name)
# find volume reference by it's name
vapp_name = self._get_vcloud_vapp_name(instance)
#if driver_type == 'iscsi':
# self._detach_volume_iscsi(instance, connection_info)
# return
result, resp = self._vcloud_client.get_disk_ref(vcloud_volume_name)
if result:
LOG.debug("Find volume successful, disk name is: %(disk_name)s"
"disk ref's href is: %(disk_href)s.",
{'disk_name': vcloud_volume_name,
'disk_href': resp.href})
else:
LOG.error(_('Unable to find volume from instance %s'),
vcloud_volume_name)
if self._vcloud_client.detach_disk_from_vm(vapp_name, resp):
LOG.info("Volume %(volume_name)s detached from: %(instance_name)s",
{'volume_name': vcloud_volume_name,
'instance_name': instance_name})
def get_info(self, instance):
vapp_name = self._get_vcloud_vapp_name(instance)
state = self._vcloud_client.get_vcloud_vapp_status(vapp_name)
return {'state': state,
'max_mem': 0,
'mem': 0,
'num_cpu': 1,
'cpu_time': 0}
def get_available_nodes(self, refresh=False):
return [self._node_name]
def plug_vifs(self, instance, network_info):
LOG.debug("plug_vifs")
# TODO: retrieve provider info ips/macs for vcloud
for vif in network_info:
self.hyper_agent_api.plug(instance.uuid, vif, None)
def unplug_vifs(self, instance, network_info):
LOG.debug("unplug_vifs")
for vif in network_info:
self.hyper_agent_api.unplug(instance.uuid, vif)
|
#!/usr/bin/python
'''
Example of inverse kinematics using the simple gradient descent method
'''
from riglib.bmi import robot_arms
import imp
imp.reload(robot_arms)
import numpy as np
import matplotlib.pyplot as plt
import time
from riglib.stereo_opengl import ik
import cProfile
pi = np.pi
q = np.array([0, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) * pi/180
q_sub = q[1::3]
chain = robot_arms.KinematicChain([15, 15, 5, 5])
[t, allt] = chain.forward_kinematics(q);
planar_chain = robot_arms.PlanarXZKinematicChain([15, 15, 5, 5])
[t, allt] = planar_chain.forward_kinematics(q_sub);
# TODO check the sign for the finger joint limits
inf = np.inf
planar_chain.joint_limits = [(-pi, pi), (-pi, 0), (-pi/2, pi/2), (-pi/2, 10*pi/180)]
# target_pos = np.array([10, 0, 10])
shoulder_anchor = np.array([2, 0, -15])
x_target_pos = (np.random.randn() - 0.5)*25
z_target_pos = (np.random.randn() - 0.5)*14
target_pos = np.array([x_target_pos, 0, z_target_pos]) - shoulder_anchor
target_pos = np.array([-14.37130744, 0. , 22.97472612])
q = q_sub[:]
def cost(q, q_start, weight=10):
return np.linalg.norm(q - q_start) + weight*np.linalg.norm(planar_chain.endpoint_pos(q) - target_pos)
def stuff():
# Initialize the particles;
n_particles = 10
n_joints = planar_chain.n_joints
q_start = np.array([np.random.uniform(-pi, pi), np.random.uniform(0, pi), np.random.uniform(-pi/2, pi/2), np.random.uniform(-pi/2, 10*pi/180)])
noise = 5*np.random.randn(3)
noise[1] = 0
angles = ik.inv_kin_2D(target_pos + noise, 15., 25.)
q_start_constr = np.array([-angles[0][1], -angles[0][3], 0, 0])
n_iter = 10
particles_q = np.tile(q_start_constr, [n_particles, 1])
particles_v = np.random.randn(n_particles, n_joints)
cost_fn = lambda x: cost(x, q_start)
gbest = particles_q.copy()
gbestcost = np.array(list(map(cost_fn, gbest)))
pbest = gbest[np.argmin(gbestcost)]
pbestcost = cost_fn(pbest)
min_limits = np.array([x[0] for x in planar_chain.joint_limits])
max_limits = np.array([x[1] for x in planar_chain.joint_limits])
min_limits = np.tile(min_limits, [n_particles, 1])
max_limits = np.tile(max_limits, [n_particles, 1])
start_time = time.time()
for k in range(n_iter):
# update positions of particles
particles_q += particles_v
# apply joint limits
# particles_q = np.array(map(lambda x: planar_chain.apply_joint_limits(x)[0], particles_q))
min_viol = particles_q < min_limits
max_viol = particles_q > max_limits
particles_q[min_viol] = min_limits[min_viol]
particles_q[max_viol] = max_limits[max_viol]
# update the costs
costs = np.array(list(map(cost_fn, particles_q)))
# update the 'bests'
gbest[gbestcost > costs] = particles_q[gbestcost > costs]
gbestcost = list(map(cost_fn, gbest))
pbest = gbest[np.argmin(gbestcost)]
pbestcost = cost_fn(pbest)
# update the velocity
phi1 = 1#np.random.rand()
phi2 = 1#np.random.rand()
w=0.25
c1=0.5
c2=0.25
particles_v = w*particles_v + c1*phi1*(np.tile(pbest, [n_particles, 1]) - particles_q) + c2*phi2*(gbest - particles_q)
if np.linalg.norm(planar_chain.endpoint_pos(pbest) - target_pos) < 0.5:
break
end_time = time.time()
print("Runtime = %g" % (end_time-start_time))
return pbest
starting_pos = np.array([-5., 0, 5])
target_pos = starting_pos - shoulder_anchor
q_start = planar_chain.random_sample()
noise = 5*np.random.randn(3)
noise[1] = 0
angles = ik.inv_kin_2D(target_pos + noise, 15., 25.)
q_start_constr = np.array([-angles[0][1], -angles[0][3], 0, 0])
pbest = planar_chain.inverse_kinematics_pso(q_start_constr, target_pos, verbose=True, time_limit=1.)
# cProfile.run('planar_chain.inverse_kinematics_pso(q_start_constr, target_pos)', timeunit=0.001)
import cProfile, pstats, io
pr = cProfile.Profile(timeunit=0.001)
pr.enable()
planar_chain.inverse_kinematics_pso(q_start_constr, target_pos)
pr.disable()
s = io.StringIO()
sortby = 'time'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
# print planar_chain.endpoint_pos(pbest)
print("target position")
print(target_pos)
print("error = %g" % np.linalg.norm(planar_chain.endpoint_pos(pbest) - target_pos))
# print "q_start_constr"
# print q_start_constr * 180/np.pi
# print "q_start"
# print q_start * 180/np.pi
|
<filename>mailchimp_marketing_asyncio/models/rss_options1.py
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RSSOptions1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'feed_url': 'str',
'frequency': 'str',
'schedule': 'SendingSchedule',
'constrain_rss_img': 'bool'
}
attribute_map = {
'feed_url': 'feed_url',
'frequency': 'frequency',
'schedule': 'schedule',
'constrain_rss_img': 'constrain_rss_img'
}
def __init__(self, feed_url=None, frequency=None, schedule=None, constrain_rss_img=None): # noqa: E501
"""RSSOptions1 - a model defined in Swagger""" # noqa: E501
self._feed_url = None
self._frequency = None
self._schedule = None
self._constrain_rss_img = None
self.discriminator = None
self.feed_url = feed_url
self.frequency = frequency
if schedule is not None:
self.schedule = schedule
if constrain_rss_img is not None:
self.constrain_rss_img = constrain_rss_img
@property
def feed_url(self):
"""Gets the feed_url of this RSSOptions1. # noqa: E501
The URL for the RSS feed. # noqa: E501
:return: The feed_url of this RSSOptions1. # noqa: E501
:rtype: str
"""
return self._feed_url
@feed_url.setter
def feed_url(self, feed_url):
"""Sets the feed_url of this RSSOptions1.
The URL for the RSS feed. # noqa: E501
:param feed_url: The feed_url of this RSSOptions1. # noqa: E501
:type: str
"""
if feed_url is None:
raise ValueError("Invalid value for `feed_url`, must not be `None`") # noqa: E501
self._feed_url = feed_url
@property
def frequency(self):
"""Gets the frequency of this RSSOptions1. # noqa: E501
The frequency of the RSS Campaign. # noqa: E501
:return: The frequency of this RSSOptions1. # noqa: E501
:rtype: str
"""
return self._frequency
@frequency.setter
def frequency(self, frequency):
"""Sets the frequency of this RSSOptions1.
The frequency of the RSS Campaign. # noqa: E501
:param frequency: The frequency of this RSSOptions1. # noqa: E501
:type: str
"""
if frequency is None:
raise ValueError("Invalid value for `frequency`, must not be `None`") # noqa: E501
allowed_values = ["daily", "weekly", "monthly"] # noqa: E501
if frequency not in allowed_values:
raise ValueError(
"Invalid value for `frequency` ({0}), must be one of {1}" # noqa: E501
.format(frequency, allowed_values)
)
self._frequency = frequency
@property
def schedule(self):
"""Gets the schedule of this RSSOptions1. # noqa: E501
:return: The schedule of this RSSOptions1. # noqa: E501
:rtype: SendingSchedule
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""Sets the schedule of this RSSOptions1.
:param schedule: The schedule of this RSSOptions1. # noqa: E501
:type: SendingSchedule
"""
self._schedule = schedule
@property
def constrain_rss_img(self):
"""Gets the constrain_rss_img of this RSSOptions1. # noqa: E501
Whether to add CSS to images in the RSS feed to constrain their width in campaigns. # noqa: E501
:return: The constrain_rss_img of this RSSOptions1. # noqa: E501
:rtype: bool
"""
return self._constrain_rss_img
@constrain_rss_img.setter
def constrain_rss_img(self, constrain_rss_img):
"""Sets the constrain_rss_img of this RSSOptions1.
Whether to add CSS to images in the RSS feed to constrain their width in campaigns. # noqa: E501
:param constrain_rss_img: The constrain_rss_img of this RSSOptions1. # noqa: E501
:type: bool
"""
self._constrain_rss_img = constrain_rss_img
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RSSOptions1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RSSOptions1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# Copyright (c) 2009-2010 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import cgi
import httplib
import logging
import threading
import urlparse
import batchhttp.client
import httplib2
from oauth import oauth
import typepad
__all__ = ('OAuthAuthentication', 'OAuthClient', 'OAuthHttp', 'log')
log = logging.getLogger(__name__)
class OAuthAuthentication(httplib2.Authentication):
"""An `httplib2.Authentication` module that provides OAuth authentication.
The OAuth authentication will be tried automatically, but to use OAuth
authentication with a particular user agent (`Http` instance), it must
have the OAuth consumer and access token set as one of its sets of
credentials. For instance:
>>> csr = oauth.OAuthConsumer(key='blah', secret='moo')
>>> token = get_access_token_for(user)
>>> http.add_credentials(csr, token)
"""
def request(self, method, request_uri, headers, content):
"""Add the HTTP Authorization header to the headers for this request.
In this implementation, the Authorization header contains the OAuth
signing information and signature.
"""
# httplib2 only gives us the URI in parts, so rebuild it from the
# partial uri and host.
partial_uri = urlparse.urlsplit(request_uri)
# Check the query to see if the URI is already signed.
query = partial_uri[3]
querydict = cgi.parse_qs(query)
if 'oauth_signature' in querydict:
# The URI is already signed. Don't do anything.
return
uri = urlparse.urlunsplit((self.http.default_scheme, self.host) + partial_uri[2:])
req = self.signed_request(uri, method)
headers.update(req.to_header())
def signed_request(self, uri, method):
"""Returns an `OAuthRequest` for the given URL and HTTP method, signed
with this `OAuthAuthentication` instance's credentials."""
csr, token = self.credentials
assert token.secret is not None
req = oauth.OAuthRequest.from_consumer_and_token(csr, token,
http_method=method, http_url=uri)
sign_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
req.set_parameter('oauth_signature_method', sign_method.get_name())
log.debug('Signing base string %r for web request %s'
% (sign_method.build_signature_base_string(req, csr, token),
uri))
req.sign_request(sign_method, csr, token)
return req
httplib2.AUTH_SCHEME_CLASSES['oauth'] = OAuthAuthentication
httplib2.AUTH_SCHEME_ORDER[0:0] = ('oauth',) # unshift onto front
class OAuthHttp(httplib2.Http):
"""An HTTP user agent for an OAuth web service."""
default_scheme = 'https'
def add_credentials(self, name, password, domain=""):
"""Adds a name (or `OAuthConsumer` instance) and password (or
`OAuthToken` instance) to this user agent's available credentials.
If ``name`` is an `OAuthConsumer` instance and the ``domain`` parameter
is provided, the `OAuthHttp` instance will be configured to provide the
given OAuth credentials, even upon the first request to that domain.
(Normally the user agent will make the request unauthenticated first,
receive a challenge from the server, then make the request again with
the credentials.)
"""
super(OAuthHttp, self).add_credentials(name, password, domain)
log.debug("Setting credentials for name %s password %s"
% (name, password))
if isinstance(name, oauth.OAuthConsumer) and domain:
if self.default_scheme is None:
self.default_scheme = urlparse.urlsplit(typepad.client.endpoint)[0]
# Preauthorize these credentials for any request at that domain.
cred = (name, password)
domain = domain.lower()
auth = OAuthAuthentication(cred, domain, "%s://%s/" % ( self.default_scheme, domain ), {}, None, None, self)
self.authorizations.append(auth)
def url_for_signed_request(self, uri, method=None, headers=None, body=None):
"""Prepares to perform a request on the given URL with the given
parameters by signing the URL with any OAuth credentials available for
that URL.
If no such credentials are available, a `ValueError` is raised.
"""
if method is None:
method = 'GET'
uriparts = list(urlparse.urlparse(uri))
host = uriparts[1]
request_uri = urlparse.urlunparse([None, None] + uriparts[2:])
# find OAuthAuthentication for this uri
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
if not auths:
raise ValueError('No authorizations with which to sign a request to %r are available' % uri)
auth = sorted(auths)[0][1]
# use it to make a signed uri instead
req = auth.signed_request(uri, method)
return req.to_url()
def signed_request(self, uri, method=None, headers=None, body=None):
"""Performs a request on the given URL with the given parameters, after
signing the URL with any OAuth credentials available for that URL.
If no such credentials are available, a `ValueError` is raised.
"""
uri = self.url_for_signed_request(uri, method=method, headers=headers, body=body)
return self.request(uri=uri, method=method, headers=headers, body=body)
def interactive_authorize(self, consumer, app, **kwargs):
from textwrap import fill
# Suppress batchhttp.client's no-log-handler warning.
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger().addHandler(NullHandler())
if not isinstance(consumer, oauth.OAuthConsumer):
consumer = oauth.OAuthConsumer(*consumer)
if not isinstance(app, typepad.Application):
app = typepad.Application.get_by_id(app)
# Set up an oauth client for our signed requestses.
oauth_client = OAuthClient(consumer, None)
oauth_client.request_token_url = app.oauth_request_token_url
oauth_client.access_token_url = app.oauth_access_token_url
oauth_client.authorization_url = app.oauth_authorization_url
# Get a request token for the viewer to interactively authorize.
request_token = oauth_client.fetch_request_token(None)
log.debug("Got request token %r", request_token)
# Ask the viewer to authorize it.
approve_url = oauth_client.authorize_token(params=kwargs)
log.debug("Asking viewer to authorize token with URL %r", approve_url)
print fill("""To join your application %r, follow this link and click "Allow":"""
% app.name, width=78)
print
print "<%s>" % approve_url
print
try:
verifier = raw_input('Enter the verifier code TypePad gave you: ')
except KeyboardInterrupt:
print
return
# Exchange the authorized request token for an access token.
access_token = oauth_client.fetch_access_token(verifier=verifier)
# Re-authorize ourselves using that access token, so we can make authenticated requests with it.
domain = urlparse.urlsplit(self.endpoint)[1]
self.add_credentials(consumer, access_token, domain=domain)
# Make sure the key works.
typepad.client.batch_request()
user = typepad.User.get_self()
typepad.client.complete_batch()
# Yay! Give the access token to the viewer for their reference.
print
print fill("""Yay! This new access token authorizes this typepad.client to act as %s (%s). Here's the token:"""
% (user.display_name, user.url_id), width=78)
print """
Key: %s
Secret: %s
""" % (access_token.key, access_token.secret)
print fill("""Pass this access token to typepad.client.add_credentials() to re-authorize as %s later."""
% user.display_name, width=78)
print
return access_token
class OAuthClient(oauth.OAuthClient):
"""An `OAuthClient` for interacting with the TypePad API."""
consumer = None
request_token_url = None
access_token_url = None
authorization_url = None
callback_url = None
def set_consumer(self, key, secret):
self.consumer = oauth.OAuthConsumer(
key = key,
secret = secret,
)
def set_token_from_string(self, token_str):
self.token = oauth.OAuthToken.from_string(token_str)
def fetch_request_token(self, callback):
if not callback:
callback = 'oob'
h = typepad.client
h.clear_credentials()
req = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
http_method='GET',
http_url=self.request_token_url,
callback=callback,
)
sign_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
req.set_parameter('oauth_signature_method', sign_method.get_name())
log.debug('Signing base string %r in fetch_request_token()'
% (sign_method.build_signature_base_string(req, self.consumer,
self.token),))
req.sign_request(sign_method, self.consumer, self.token)
log.debug('Asking for request token from %r', req.to_url())
resp, content = h.request(req.to_url(), method=req.get_normalized_http_method())
if resp.status != 200:
log.debug(content)
raise httplib.HTTPException('WHAT %d %s?!' % (resp.status, resp.reason))
self.token = oauth.OAuthToken.from_string(content)
return self.token
def fetch_access_token(self, request_token_str=None, verifier=None):
# -> OAuthToken
h = typepad.client
req = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
token = self.token,
http_url = self.access_token_url,
verifier = verifier,
)
sign_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
req.set_parameter('oauth_signature_method', sign_method.get_name())
log.debug('Signing base string %r in fetch_access_token()'
% (sign_method.build_signature_base_string(req, self.consumer,
self.token),))
req.sign_request(sign_method, self.consumer, self.token)
resp, content = h.request(req.to_url(), method=req.get_normalized_http_method())
self.token = oauth.OAuthToken.from_string(content)
return self.token
def authorize_token(self, params=None):
"""Returns the URL at which an interactive user can authorize this
instance's request token."""
if params is None:
params = {}
req = oauth.OAuthRequest.from_token_and_callback(
self.token,
http_url=self.authorization_url,
parameters=params,
)
return req.to_url()
def get_file_upload_url(self, upload_url):
"""Returns the given upload URL, signed for performing an HTTP ``POST``
against it, with this instance's OAuth credentials.
Such a signed URL can be used for uploading asset files to TypePad.
"""
# oauth GET params for file upload url
# since the form is multipart/form-data
req = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
token = self.token,
http_method = 'POST',
http_url = upload_url,
)
sign_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
req.set_parameter('oauth_signature_method', sign_method.get_name())
log.debug('Signing base string %r in get_file_upload_url()'
% (sign_method.build_signature_base_string(req, self.consumer,
self.token),))
req.sign_request(sign_method, self.consumer, self.token)
return req.to_url()
class TypePadClient(batchhttp.client.BatchClient, OAuthHttp):
"""An HTTP user agent for performing TypePad API requests.
A `TypePadClient` instance supports the same interface as `httplib2.Http`
instances, plus some special methods for performing OAuth authenticated
requests, and using TypePad's batch HTTP endpoint.
Each `TypePadClient` instance also has a `cookies` member, a dictionary
containing any additional HTTP cookies to send when making API requests.
"""
endpoint = 'http://api.typepad.com'
"""The URL against which to perform TypePad API requests."""
subrequest_limit = 20
"""The number of subrequests permitted for a given batch."""
def __init__(self, *args, **kwargs):
self.cookies = dict()
self._consumer = None
self._token = None
kwargs['endpoint'] = self.endpoint
super(TypePadClient, self).__init__(*args, **kwargs)
self.follow_redirects = False
def request(self, uri, method="GET", body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
"""Makes the given HTTP request, as specified.
If the instance's ``cookies`` dictionary contains any cookies, they
will be sent along with the request.
See `httplib2.Http.request()` for more information.
"""
if self.cookies:
if headers is None:
headers = {}
else:
headers = dict(headers)
cookies = ['='.join((key, value)) for key, value in self.cookies.items()]
headers['cookie'] = '; '.join(cookies)
return super(TypePadClient, self).request(uri, method, body, headers, redirections, connection_type)
def add_credentials(self, name, password, domain=""):
endparts = urlparse.urlsplit(self.endpoint)
if domain == '':
domain = endparts[1]
if isinstance(name, oauth.OAuthConsumer) and domain == endparts[1]:
# We're adding TypePad credentials, so upgrade to HTTPS.
self.endpoint = urlparse.urlunsplit(('https',) + endparts[1:])
super(TypePadClient, self).add_credentials(name, password, domain)
def clear_credentials(self):
super(TypePadClient, self).clear_credentials()
# We cleared our TypePad credentials too, so downgrade to HTTP.
endparts = urlparse.urlsplit(self.endpoint)
self.endpoint = urlparse.urlunsplit(('http',) + endparts[1:])
def signed_request(self, uri, method=None, body=None, headers=None):
"""Performs the given request, after signing the URL with the user
agent's configured OAuth credentials.
If the given URL is not an absolute URL, it is taken as relative to
this instance's endpoint first.
"""
host = urlparse.urlparse(uri)[1]
if not host:
uri = urlparse.urljoin(self.endpoint, uri)
return super(TypePadClient, self).signed_request(uri=uri,
method=method, body=body, headers=headers)
def _get_consumer(self):
return self._consumer
def _set_consumer(self, consumer):
if isinstance(consumer, tuple):
consumer = oauth.OAuthConsumer(consumer[0], consumer[1])
assert(consumer is None or isinstance(consumer, oauth.OAuthConsumer))
if self._consumer != consumer:
self._consumer = consumer
if consumer is None:
self.clear_credentials()
else:
self._reauthorize()
consumer = property(_get_consumer, _set_consumer)
def _get_token(self):
return self._token
def _set_token(self, token):
if isinstance(token, tuple):
token = oauth.OAuthToken(token[0], token[1])
assert(token is None or isinstance(token, oauth.OAuthToken))
if self._token != token:
self._token = token
# if token is None, forcibly clear credentials
if token is None:
self.clear_credentials()
else:
self._reauthorize()
token = property(_get_token, _set_token)
def _reauthorize(self):
if self._consumer is not None and self._token is not None:
self.clear_credentials()
self.add_credentials(self._consumer, self._token)
class ThreadAwareTypePadClientProxy(object):
def __init__(self):
self._local = threading.local()
def _get_client(self):
if not hasattr(self._local, 'client'):
self.client = typepad.client_factory()
return self._local.client
def _set_client(self, new_client):
self._local.client = new_client
client = property(_get_client, _set_client)
"""Property for accessing the real client instance.
Constructs a TypePadClient if the active thread doesn't have one."""
def __getattr__(self, name):
if name in ('_local', 'client'):
return super(ThreadAwareTypePadClientProxy,
self).__getattr__(name)
else:
return getattr(self.client, name)
def __setattr__(self, name, value):
if name in ('_local', 'client'):
super(ThreadAwareTypePadClientProxy, self).__setattr__(name,
value)
else:
setattr(self.client, name, value)
|
import tensorflow as tf
from utils import FLAT_COLOR_DIMS, COLOR_DIMS
IMAGE_SIZE = 416
# TODO(indutny): there is no reason to not calculate grid_size automatically
GRID_SIZE = 13
GRID_CHANNELS = 7
PRIOR_SIZES = [
[ 0.14377480392797287, 0.059023397839700086 ],
[ 0.20904473801128326, 0.08287369797830041 ],
[ 0.2795802996888472, 0.11140121237843759 ],
[ 0.3760081365223815, 0.1493933380505552 ],
[ 0.5984967942142249, 0.2427157057261726 ],
]
class Model:
def __init__(self, config, prior_sizes=PRIOR_SIZES):
self.config = config
self.prior_sizes = tf.constant(prior_sizes, dtype=tf.float32,
name='prior_sizes')
self.iou_threshold = config.iou_threshold
self.weight_decay = config.weight_decay
self.grid_depth = config.grid_depth
self.lambda_angle = config.lambda_angle
self.lambda_obj = config.lambda_obj
self.lambda_no_obj = config.lambda_no_obj
self.lambda_coord = config.lambda_coord
self.trainable_variables = None
def forward(self, image, training=False, coreml=False):
with tf.variable_scope('resistenz', reuse=tf.AUTO_REUSE, \
values=[ image ]) as scope:
x = image
x = self.conv_bn(x, filters=16, size=3, name='1', training=training)
x = self.max_pool(x, size=2, stride=2, name='1')
x = self.conv_bn(x, filters=32, size=3, name='2', training=training)
x = self.max_pool(x, size=2, stride=2, name='2')
x = self.conv_bn(x, filters=64, size=3, name='3', training=training)
x = self.max_pool(x, size=2, stride=2, name='3')
x = self.conv_bn(x, filters=128, size=3, name='4', training=training)
x = self.max_pool(x, size=2, stride=2, name='4')
x = self.conv_bn(x, filters=256, size=3, name='5', training=training)
x = self.max_pool(x, size=2, stride=2, name='5')
x = self.conv_bn(x, filters=512, size=3, name='6', training=training)
x = self.max_pool(x, size=2, stride=1, name='6')
# TODO(indutny): residual routes
if not self.config.minimal:
x = self.conv_bn(x, filters=1024, size=3, name='pre_final',
training=training)
####
if not self.config.minimal:
x = self.conv_bn(x, filters=256, size=1, name='final_1',
training=training)
x = self.conv_bn(x, filters=512, size=3, name='final_2',
training=training)
else:
x = self.conv_bn(x, filters=128, size=3, name='final_2',
training=training)
x = self.conv_bn(x, filters=self.grid_depth * GRID_CHANNELS + \
FLAT_COLOR_DIMS, size=1,
name='last', activation=None, training=training)
x, colors, raw_colors = self.output(x, coreml=coreml)
self.trainable_variables = scope.trainable_variables()
return x, colors, raw_colors
def loss_and_metrics(self, prediction, prediction_raw_colors, labels, \
tag='train'):
# Just a helpers
def sum_over_cells(x, name=None, max=False):
if max:
return tf.reduce_max(x, axis=3, name=name)
else:
return tf.reduce_sum(x, axis=3, name=name)
def sum_over_grid(x, name=None, max=False):
if max:
return tf.reduce_max(tf.reduce_max(x, axis=2), axis=1, name=name)
else:
return tf.reduce_sum(tf.reduce_sum(x, axis=2), axis=1, name=name)
with tf.variable_scope('resistenz_loss_{}'.format(tag), reuse=False, \
values=[ prediction, prediction_raw_colors, labels ]):
labels, label_colors = tf.split(labels, \
[ GRID_CHANNELS, FLAT_COLOR_DIMS ], axis=-1)
prediction = self.parse_box(prediction, 'prediction')
labels = self.parse_box(labels, 'labels')
iou = self.iou(prediction, labels)
# (cos x - cos y)^2 + (sin x - sin y)^2 = 2 ( 1 - cos [ x - y ] )
angle_diff = tf.reduce_mean(
(prediction['angle'] - labels['angle']) ** 2, axis=-1,
name='angle_diff')
abs_cos_diff = tf.abs(1.0 - angle_diff, name='abs_cos_diff')
iou *= abs_cos_diff
# Compute masks
active_anchors = tf.one_hot(tf.argmax(iou, axis=-1), depth=self.grid_depth,
axis=-1, on_value=1.0, off_value=0.0, dtype=tf.float32,
name='active_anchors')
active_anchors *= labels['confidence']
# Disable training for anchors with high IoU
passive_anchors = labels['confidence']
passive_anchors *= tf.cast(iou >= self.iou_threshold, dtype=tf.float32)
inactive_anchors = 1.0 - tf.maximum(active_anchors, passive_anchors)
inactive_anchors = tf.identity(inactive_anchors, name='inactive_anchors')
expected_confidence = active_anchors
# Confidence loss
confidence_loss = \
(prediction['confidence'] - expected_confidence) ** 2 / 2.0
obj_loss = sum_over_cells( \
self.lambda_obj * active_anchors * confidence_loss, name='obj_loss')
no_obj_loss = sum_over_cells( \
self.lambda_no_obj * inactive_anchors * confidence_loss,
name='no_obj_loss')
# Coordinate loss
center_loss = tf.reduce_mean(
(GRID_SIZE * (prediction['center'] - labels['center'])) ** 2,
axis=-1, name='center_loss')
size_loss = tf.reduce_mean(
(tf.sqrt(prediction['size']) - tf.sqrt(labels['size'])) ** 2,
axis=-1, name='size_loss')
angle_loss = self.lambda_angle * (1.0 - abs_cos_diff)
coord_loss = self.lambda_coord * active_anchors * \
(center_loss + size_loss + angle_loss)
coord_loss = sum_over_cells(coord_loss, name='coord_loss')
# Color loss
label_colors = tf.split(label_colors, COLOR_DIMS, axis=-1,
name='split_label_colors')
color_loss = 0.0
for l_colors, p_colors in zip(label_colors, prediction_raw_colors):
color_loss += tf.nn.softmax_cross_entropy_with_logits_v2( \
labels=l_colors,
logits=p_colors)
# Mean for each group
color_loss /= len(label_colors) + 1e-23
color_loss *= tf.squeeze(labels['confidence'], axis=-1)
color_loss = tf.identity(color_loss, name='color_loss')
# To batch losses
obj_loss = sum_over_grid(obj_loss)
no_obj_loss = sum_over_grid(no_obj_loss)
coord_loss = sum_over_grid(coord_loss)
color_loss = sum_over_grid(color_loss)
# To scalars
obj_loss = tf.reduce_mean(obj_loss)
no_obj_loss = tf.reduce_mean(no_obj_loss)
coord_loss = tf.reduce_mean(coord_loss)
color_loss = tf.reduce_mean(color_loss)
# Weight decay
weight_loss = 0.0
for var in self.trainable_variables:
if not 'bn_' in var.name:
weight_loss += tf.nn.l2_loss(var)
weight_loss *= self.weight_decay
# Total
total_loss = obj_loss + no_obj_loss + coord_loss + color_loss
regularization_loss = weight_loss
# Count objects for metrics below
active_count = sum_over_grid(sum_over_cells(active_anchors),
name='active_count')
active_count = tf.expand_dims(active_count, axis=-1)
active_count = tf.expand_dims(active_count, axis=-1)
active_count = tf.expand_dims(active_count, axis=-1)
# Some metrics
mean_anchors = active_anchors / (active_count + 1e-23)
mean_iou = sum_over_grid(sum_over_cells(iou * mean_anchors))
mean_iou = tf.reduce_mean(mean_iou)
center_loss = self.lambda_coord * center_loss * active_anchors
size_loss = self.lambda_coord * size_loss * active_anchors
angle_loss = self.lambda_coord * angle_loss * active_anchors
center_loss = sum_over_grid(sum_over_cells(center_loss))
size_loss = sum_over_grid(sum_over_cells(size_loss))
angle_loss = sum_over_grid(sum_over_cells(angle_loss))
center_loss = tf.reduce_mean(center_loss)
size_loss = tf.reduce_mean(size_loss)
angle_loss = tf.reduce_mean(angle_loss)
# NOTE: create metrics outside of variable scope for clearer name
metrics = [
tf.summary.scalar('{}/iou'.format(tag), mean_iou),
tf.summary.scalar('{}/obj_loss'.format(tag), obj_loss),
tf.summary.scalar('{}/no_obj_loss'.format(tag), no_obj_loss),
tf.summary.scalar('{}/coord_loss'.format(tag), coord_loss),
tf.summary.scalar('{}/center_loss'.format(tag), center_loss),
tf.summary.scalar('{}/size_loss'.format(tag), size_loss),
tf.summary.scalar('{}/angle_loss'.format(tag), angle_loss),
tf.summary.scalar('{}/loss'.format(tag), total_loss),
tf.summary.scalar('{}/weight_loss'.format(tag), weight_loss),
tf.summary.scalar('{}/color_loss'.format(tag), color_loss),
]
return total_loss + regularization_loss, tf.summary.merge(metrics)
# Helpers
def conv_bn(self, input, filters, size, name, training, \
activation=lambda x: tf.nn.leaky_relu(x, alpha=0.1)) :
x = tf.layers.conv2d(input, filters=filters, kernel_size=size, \
padding='SAME',
name='conv_{}'.format(name))
if not activation is None:
x = tf.layers.batch_normalization(x, momentum=0.9, epsilon=1e-5,
training=training,
name='bn_{}'.format(name))
x = activation(x)
return x
def max_pool(self, input, size, stride, name):
return tf.layers.max_pooling2d(input, pool_size=size, strides=stride,
padding='SAME')
def output(self, x, coreml=False):
with tf.name_scope('output', values=[ x ]):
batch_size = tf.shape(x)[0]
if coreml:
# CoreML does not support rank-5 tensors, strided slices, and so on
x = tf.reshape(x, [
batch_size, GRID_SIZE, GRID_SIZE,
FLAT_COLOR_DIMS + self.grid_depth * GRID_CHANNELS,
], name='output')
return x
x, colors = tf.split(x, \
[ self.grid_depth * GRID_CHANNELS, FLAT_COLOR_DIMS ], axis=-1)
x = tf.reshape(x, [
batch_size, GRID_SIZE, GRID_SIZE, self.grid_depth, GRID_CHANNELS,
])
center, size, angle, confidence = \
tf.split(x, [ 2, 2, 2, 1 ], axis=-1)
center = tf.sigmoid(center)
size = tf.exp(size)
angle = tf.nn.l2_normalize(angle, axis=-1)
confidence = tf.sigmoid(confidence)
# Apply softmax over each color group
raw_colors = tf.split(colors, COLOR_DIMS, axis=-1)
split_colors = [ tf.nn.softmax(l, axis=-1) for l in raw_colors ]
colors = tf.concat(split_colors, axis=-1)
# Apply priors
with tf.name_scope('apply_prior_sizes',
values=[ size, self.prior_sizes ]):
size *= self.prior_sizes
x = tf.concat([ center, size, angle, confidence ], axis=-1,
name='output')
# Return raw_colors for use in the loss
return x, colors, raw_colors
def parse_box(self, input, name):
center, size, angle, confidence = tf.split(input, \
[ 2, 2, 2, 1 ], \
axis=-1, name='{}_box_split'.format(name))
confidence = tf.squeeze(confidence, axis=-1,
name='{}_confidence'.format(name))
center /= GRID_SIZE
half_size = size / 2.0
return {
'center': center,
'size': size,
'angle': angle,
'confidence': confidence,
'top_left': center - half_size,
'bottom_right': center + half_size,
'area': self.area(size, name),
}
def area(self, size, name):
width, height = tf.split(size, [ 1, 1 ], axis=-1)
return tf.squeeze(width * height, axis=-1, name='{}_area'.format(name))
def iou(self, a, b):
top_left = tf.maximum(a['top_left'], b['top_left'], name='iou_top_left')
bottom_right = tf.minimum(a['bottom_right'], b['bottom_right'],
name='iou_bottom_right')
size = tf.nn.relu(bottom_right - top_left, name='iou_size')
intersection = self.area(size, 'iou_area')
union = a['area'] + b['area'] - intersection
return intersection / (union + 1e-23)
|
<reponame>brianhie/trajectorama<filename>bin/dataset_zeisel_adolescent_brain.py
from anndata import AnnData
import loompy
import numpy as np
import os
from scanorama import *
import scanpy as sc
from scipy.sparse import vstack
from sklearn.preprocessing import normalize
from process import process, load_names, merge_datasets
from utils import *
NAMESPACE = 'zeisel_adolescent_brain'
DIMRED = 100
DR_METHOD = 'svd'
data_names = [
'data/mouse_brain/zeisel/amygdala',
'data/mouse_brain/zeisel/cerebellum',
'data/mouse_brain/zeisel/cortex1',
'data/mouse_brain/zeisel/cortex2',
'data/mouse_brain/zeisel/cortex3',
'data/mouse_brain/zeisel/hippocampus',
'data/mouse_brain/zeisel/hypothalamus',
'data/mouse_brain/zeisel/medulla',
'data/mouse_brain/zeisel/midbraindorsal',
'data/mouse_brain/zeisel/midbrainventral',
'data/mouse_brain/zeisel/olfactory',
'data/mouse_brain/zeisel/pons',
'data/mouse_brain/zeisel/striatumdorsal',
'data/mouse_brain/zeisel/striatumventral',
'data/mouse_brain/zeisel/thalamus',
]
def keep_valid(datasets):
barcode_sub_type = {}
with loompy.connect('data/mouse_brain/zeisel/l6_r1.loom') as ds:
for barcode, sub_type in zip(ds.ca['CellID'], ds.ca['ClusterName']):
#for barcode, sub_type in zip(ds.ca['CellID'], ds.ca['Taxonomy_group']):
barcode_sub_type[barcode] = sub_type
valid_idx = []
cell_types = []
sub_types = []
ages = []
for data_name in data_names:
with open('{}/meta.tsv'.format(data_name)) as f:
excluded = set([
'Blood', 'Excluded', 'Immune', 'Vascular',
])
for j, line in enumerate(f):
fields = line.rstrip().split('\t')
if fields[1] == 'Neurons' and fields[2] != '?':
valid_idx.append(j)
cell_types.append(fields[1])
if fields[0] in barcode_sub_type:
sub_types.append(barcode_sub_type[fields[0]])
else:
sub_types.append('NA')
try:
age = float(fields[2][1:])
except ValueError:
age = fields[2]
if age == 'p12, p35':
age = (12 + 35) / 2.
elif age == 'p16, p24':
age = (16 + 24) / 2.
elif age == 'p19, p21':
age = (19 + 21) / 2.
elif age == 'p21-23' or age == 'p21, p23':
age = (21 + 23) / 2.
elif age == 'p22-24':
age = (22 + 24) / 2.
elif age == 'p25-27':
age = (25 + 27) / 2.
elif age == '6w':
age = 7 * 6.
else:
continue
min_age = 19.
max_age = 60.
offset = (age - min_age) / (max_age - min_age) * 3
ages.append(19 + offset)
return valid_idx, np.array(cell_types), np.array(ages), np.array(sub_types)
datasets, genes_list, n_cells = load_names(data_names, norm=False)
qc_idx, cell_types, ages, sub_types = keep_valid(datasets)
datasets, genes = merge_datasets(datasets, genes_list)
X = vstack(datasets)
X = X[qc_idx]
qc_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= 500 ]
tprint('Found {} valid cells among all datasets'.format(len(qc_idx)))
X = X[qc_idx]
cell_types = cell_types[qc_idx]
sub_types = sub_types[qc_idx]
ages = ages[qc_idx]
if not os.path.isfile('data/dimred/{}_{}.txt'
.format(DR_METHOD, NAMESPACE)):
mkdir_p('data/dimred')
tprint('Dimension reduction with {}...'.format(DR_METHOD))
X_dimred = reduce_dimensionality(normalize(X), dim_red_k=DIMRED)
tprint('Dimensionality = {}'.format(X_dimred.shape[1]))
np.savetxt('data/dimred/{}_{}.txt'
.format(DR_METHOD, NAMESPACE), X_dimred)
else:
X_dimred = np.loadtxt('data/dimred/{}_{}.txt'
.format(DR_METHOD, NAMESPACE))
dataset = AnnData(X)
dataset.var['gene_symbols'] = genes
dataset.obs['cell_types'] = [ NAMESPACE + '_' + l for l in cell_types ]
dataset.obs['sub_types'] = [ NAMESPACE + '_' + l for l in sub_types ]
dataset.obs['ages'] = ages
datasets = [ dataset ]
namespaces = [ NAMESPACE ]
|
#!/usr/bin/env python
from __future__ import print_function
import matplotlib as mpl
#mpl.use("Agg")
import numpy as np
import matplotlib.pyplot as plt
from costar_models import *
from costar_models.planner import GetOrderedList, PrintTopQ
from costar_models.sampler2 import PredictionSampler2
from costar_models.datasets.npz import NpzDataset
from costar_models.datasets.npy_generator import NpzGeneratorDataset
from costar_models.datasets.h5f_generator import H5fGeneratorDataset
from costar_models.planner import *
from costar_models.multi import *
def main(args):
'''
Tool for running model training without the rest of the simulation/planning/ROS
code. This should be more or less independent and only rely on a couple
external features.
'''
ConfigureGPU(args)
np.random.seed(0)
data_file_info = args['data_file'].split('.')
data_type = data_file_info[-1]
root = ""
for i, tok in enumerate(data_file_info[:-1]):
if i < len(data_file_info)-1 and i > 0:
root += '.'
root += tok
if data_type == "npz":
dataset = NpzGeneratorDataset(root)
data = dataset.load(success_only = args['success_only'])
elif data_type == "h5f":
dataset = H5fGeneratorDataset(root)
data = dataset.load(success_only = args['success_only'])
else:
raise NotImplementedError('data type not implemented: %s'%data_type)
if 'model' in args and args['model'] is not None:
model = MakeModel(taskdef=None, **args)
model.validate = True
model.load(world=None,**data)
train_generator = model.trainGenerator(dataset)
test_generator = model.testGenerator(dataset)
print(">>> GOAL_CLASSIFIER")
image_discriminator = LoadGoalClassifierWeights(model,
make_classifier_fn=MakeImageClassifier,
img_shape=(64, 64, 3))
image_discriminator.compile(loss="categorical_crossentropy",
metrics=["accuracy"],
optimizer=model.getOptimizer())
show = False
correct_g1 = 0
correct_g2 = 0
total = 0
err1_sum = 0.
err2_sum = 0.
v_sum = 0.
osum = 0.
ii = 0
for filename in dataset.test:
print(filename)
data = dataset.loadFile(filename)
length = data['example'].shape[0]
features, targets = model._getData(**data)
[I0, I, o1, o2, oin] = features
[I_target, I_target2, o1_1h, value, qa, ga, o2_1h] = targets
for i in range(length):
ii += 1
xi = np.expand_dims(I[i],axis=0)
x0 = np.expand_dims(I0[i],axis=0)
prev_option = np.array([oin[i]])
h = model.encode(xi)
h0 = model.encode(x0)
h_goal = model.transform(h0, h, np.array([o1[i]]))
h_goal2 = model.transform(h0, h_goal, np.array([o2[i]]))
p = model.pnext(h0, h_goal, np.array([o1[i]]))[0]
xg = model.decode(h_goal)
xg2 = model.decode(h_goal2)
if show:
plt.subplot(1,4,1); plt.imshow(x0[0])
plt.subplot(1,4,2); plt.imshow(xi[0])
plt.subplot(1,4,3); plt.imshow(xg[0])
plt.subplot(1,4,4); plt.imshow(xg2[0])
plt.show()
res1 = np.argmax(image_discriminator.predict([x0, xg]), axis=1)
res2 = np.argmax(image_discriminator.predict([x0, xg2]), axis=1)
if res1[0] == o1[i]:
correct_g1 += 1
if res2[0] == o2[i]:
correct_g2 += 1
err1 = np.mean(np.abs((xg[0] - I_target[i])))
err2 = np.mean(np.abs((xg2[0] - I_target2[i])))
v = model.value(h_goal2)
if v[0] > 0.5 and value[i] > 0.5:
vacc = 1.
elif v[0] < 0.5 and value[i] < 0.5:
vacc = 1.
else:
vacc = 0.
if p[0,o2[i]] > 0.1:
osum += 1.
else:
#print(GetOrderedList(p[0]))
#print(p[0,o2[i]], o2[i])
pass
err1_sum += err1
err2_sum += err2
total += 1
v_sum += vacc
mean1 = err1_sum / total
mean2 = err2_sum / total
print(correct_g1, "/", total, correct_g2, "/", total, "...",
o1[i], o2[i],
res1[0], res2[0],
#"errs =", err1, err2,
"means =", mean1, mean2,
"next =", osum, (osum/total),
"value =", v, value[i], "avg =", (v_sum/total))
else:
raise RuntimeError('Must provide a model to load')
if __name__ == '__main__':
args = ParseModelArgs()
if args['profile']:
import cProfile
cProfile.run('main(args)')
else:
main(args)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 0