code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""Embedded DSL for assembling logic circuits.
Embedded domain-specific combinator library for
assembling abstract definitions of logic circuits
and synthesizing circuits from those definitions.
"""
from __future__ import annotations
from typing import Sequence
import doctest
from parts import parts
from circuit import op, gate, circuit, signature
class bit():
"""
Class for representing an abstract bit. Such a bit
can be interpreted concretely as a value, but it is
also used to keep track of relationships between
operators and to represent the wires within a
circuit built up out of those operators.
>>> bit.hook_operation(lambda o, v, *args: None)
>>> bit.circuit(circuit())
>>> b = output(input(1).and_(input(1)))
>>> b.value == bit.circuit().evaluate([1,1])[0]
True
>>> def make_hook(bit_):
... def hook(o, v, *args):
... return bit_.constructor(*args)(v, bit_.gate(o, [a.gate for a in args]))
... return hook
>>> bit.hook_operation(make_hook(bit))
>>> bit.circuit(circuit())
>>> b = output(input(0).and_(input(0)))
>>> b.value == bit.circuit().evaluate([0,0])[0]
True
"""
_circuit = None
_hook_operation = None
@staticmethod
def circuit(circuit_=None):
if circuit_ is not None:
bit._circuit = circuit_
return None
else:
bit._circuit.prune_and_topological_sort_stable()
return bit._circuit
@staticmethod
def hook_operation(hook=None):
bit._hook_operation = hook
@staticmethod
def operation(o, *args):
# Ensure second argument is a `bit`.
args = list(args)
if len(args) == 2:
args[1] = constant(args[1]) if isinstance(args[1], int) else args[1]
# Compute the value of the result of the operation on the arguments.
v = o(*[a.value for a in args])
# Return output from hook if it exists and if
# it returns an output.
if bit._hook_operation is not None:
r = bit._hook_operation(o, v, *args)
if r is not None:
return r
return bit.constructor(*args)(v, bit.gate(o, [a.gate for a in args]))
@staticmethod
def constructor(b1, b2=None):
# The inference code below is not currently in use.
"""
if isinstance(b1, input_one) and isinstance(b2, input_one):
return input_one
elif isinstance(b1, input_two) and isinstance(b2, input_two):
return input_two
elif isinstance(b1, (input_one, input_two)) and b2 is None:
return type(b1)
else:
return bit
"""
return bit
@staticmethod
def gate(operation, igs):
return bit._circuit.gate(operation, igs)
def __init__(self, value, gate_=None):
self.value = value
self.gate = bit._circuit.gate() if gate_ is None else gate_
def __int__(self):
return self.value
def not_(self):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(input(x).not_())
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
"""
return bit.operation(op.not_, self)
def __invert__(self):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(~input(x))
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
"""
return bit.operation(op.not_, self)
def __rsub__(self, other):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(1 - input(x))
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
>>> bit.circuit(circuit())
>>> 2 - input(0)
Traceback (most recent call last):
...
ValueError: can only subtract a bit from the integer 1
"""
if other == 1:
return bit.operation(op.not_, self)
raise ValueError('can only subtract a bit from the integer 1')
def and_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).and_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.and_, self, other)
def __and__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) & input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.and_, self, other)
def __rand__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 0 & constant(1)
>>> b.value
0
"""
return self & (constant(other) if isinstance(other, int) else other)
def nimp(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nimp(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nimp_, self, other)
def nimp_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nimp_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nimp_, self, other)
def __gt__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) > input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return self.nimp(other)
def nif(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nif(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nif_, self, other)
def nif_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nif_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nif_, self, other)
def __lt__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) < input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return self.nif(other)
def xor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def xor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def __xor__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) ^ input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def __rxor__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 1 ^ constant(0)
>>> b.value
1
"""
return self ^ (constant(other) if isinstance(other, int) else other)
def or_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).or_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.or_, self, other)
def __or__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) | input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.or_, self, other)
def __ror__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 1 | constant(0)
>>> b.value
1
"""
return self | (constant(other) if isinstance(other, int) else other)
def nor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def nor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def __mod__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) % input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def xnor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xnor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def xnor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xnor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def __eq__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) == input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def if_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).if_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.if_, self, other)
def __ge__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) >= input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.if_, self, other)
def imp(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).imp(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def imp_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).imp_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def __le__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) <= input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def nand(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nand(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
def nand_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nand_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
def __matmul__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) @ input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
class constant(bit):
"""Bit that is designated as a constant input."""
class input(bit):
"""Bit that is designated as a variable input."""
def __init__(self: bit, value: int):
self.value = value
self.gate = bit._circuit.gate(op.id_, is_input=True)
class input_one(input):
"""Bit that is designated as a variable input from one source."""
class input_two(input):
"""Bit that is designated as a variable input from a second source."""
class output(bit):
"""
Bit that is designated an output.
>>> bit.circuit(circuit())
>>> b0 = output(input(1).not_())
>>> b1 = output(b0.not_())
>>> b2 = output(b0)
>>> [b0.value, b1.value, b2.value]
[0, 1, 0]
"""
def __init__(self: bit, b: bit):
# Check if bit is ready as final output or whether there are others dependent on it.
if len(b.gate.outputs) > 0:
b = ~(~b) # Preserve the bit by copying it to a new wire.
self.value = b.value
self.gate = bit._circuit.gate(op.id_, [b.gate], is_output=True)
class bits_type(int): # pylint: disable=R0903
"""
Class for representing an input or output type of a
function decorated for automated synthesis.
"""
class bits(list):
"""
Class for representing a vector of abstract bits.
"""
@staticmethod
def from_byte(byte_: int, constructor=bit) -> bits:
return bits([
constructor(bit_)
for bit_ in reversed([(byte_>>i)%2 for i in range(8)])
])
@staticmethod
def from_bytes(bytes_, constructor=bit) -> bits:
"""
>>> bit.circuit(circuit())
>>> [b.value for b in bits.from_bytes(bytes([255]))]
[1, 1, 1, 1, 1, 1, 1, 1]
>>> bit.circuit(circuit())
>>> [b.value for b in bits.from_bytes(bytes([11, 0]))]
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
"""
return bits([
bit_
for byte_ in bytes_
for bit_ in bits.from_byte(byte_, constructor)
])
@staticmethod
def zeros(n: int) -> bits:
"""
>>> bit.circuit(circuit())
>>> xs = bits.zeros(3)
>>> ys = outputs(xs.not_())
>>> [y.value for y in ys]
[1, 1, 1]
"""
return bits([constant(0)]*n)
def __new__(cls, argument = None) -> bits:
"""
Return bits object given the supplied argument.
"""
return bits_type(argument)\
if isinstance(argument, int) else\
list.__new__(cls, argument)
def __int__(self: bits) -> int:
"""
>>> bit.circuit(circuit())
>>> xs = constants([0, 0, 0])
>>> ys = outputs(xs.not_())
>>> int(ys)
7
"""
return sum(int(b)*(2**i) for (i, b) in zip(range(len(self)), reversed(self)))
def not_(self: bits) -> bits:
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... xs = inputs([x, x, x])
... ys = outputs(xs.not_())
... ns = [int(y) for y in ys]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x]))
>>> all(results)
True
"""
return bits([x.not_() for x in self])
def __invert__(self: bits) -> bits:
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... xs = inputs([x, x, x])
... ys = outputs(~xs)
... ns = [int(y) for y in ys]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x]))
>>> all(results)
True
"""
return bits([x.not_() for x in self])
def and_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.and_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.and_(y) for (x, y) in zip(self, other)])
def __and__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs & ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.and_(y) for (x, y) in zip(self, other)])
def nimp(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nimp(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def nimp_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nimp_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def __gt__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs > ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def nif(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nif(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def nif_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nif_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def __lt__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs < ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def xor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def xor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def __xor__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs ^ ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def or_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.or_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.or_(y) for (x, y) in zip(self, other)])
def __or__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs | ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.or_(y) for (x, y) in zip(self, other)])
def nor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def nor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def __mod__(self, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs % ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def xnor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xnor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def xnor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xnor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def __eq__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs == ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def if_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.if_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.if_(y) for (x, y) in zip(self, other)])
def __ge__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs >= ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.if_(y) for (x, y) in zip(self, other)])
def imp(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.imp(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def imp_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.imp_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def __le__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs <= ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def nand(self: bits, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nand(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nand_(y) for (x, y) in zip(self, other)])
def nand_(self: bits, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nand_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nand_(y) for (x, y) in zip(self, other)])
def __rshift__(self: bits, other) -> bits:
"""
Overloaded operator: rotation and shift operations.
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bs = bs >> 3
>>> [b.value for b in bs]
[0, 0, 0, 1, 1, 1, 1, 0]
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [0,0,0,0,1,1,1,1]))
>>> bs = bs >> {3}
>>> [b.value for b in bs]
[1, 1, 1, 0, 0, 0, 0, 1]
"""
if isinstance(other, set) and isinstance(list(other)[0], int): # Rotation.
quantity = list(other)[0]
return bits(self[len(self)-quantity:]) ** bits(self[0:len(self)-quantity])
else: # Shift
return bits([constant(0)]*other) ** bits(self[0:len(self)-other])
def __lshift__(self: bits, other) -> bits:
"""
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bs = bs << 3
>>> [b.value for b in bs]
[1, 0, 0, 0, 0, 0, 0, 0]
"""
return bits(self[other:]) ** bits([constant(0) for _ in range(other)])
def __truediv__(self: bits, other) -> Sequence[bits]:
"""
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / 2)
>>> ([b.value for b in bss[0]], [b.value for b in bss[1]])
([1, 1, 1, 1], [0, 0, 0, 0])
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / {2})
>>> [[b.value for b in bs] for bs in bss]
[[1, 1], [1, 1], [0, 0], [0, 0]]
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / [1, 3, 4])
>>> [[b.value for b in bs] for bs in bss]
[[1], [1, 1, 1], [0, 0, 0, 0]]
"""
if isinstance(other, list) and len(other) > 0 and isinstance(other[0], int):
return map(bits, parts(self, length=other)) # Sequence of lengths.
elif isinstance(other, set) and len(other) == 1 and isinstance(list(other)[0], int):
return self / (len(self)//list(other)[0]) # Parts of length `other`.
else:
return map(bits, parts(self, other)) # Number of parts is `other`.
def __add__(self: bits, other) -> bits:
"""Concatenation of bit vectors."""
result = list(self)
result.extend(list(other))
return bits(result)
def __pow__(self: bits, other) -> bits:
"""Concatenation of bit vectors."""
return self + other
def constants(l):
return bits(map(constant, l))
def inputs(l):
return bits(map(input, l))
def outputs(l):
return bits(map(output, l))
def synthesize(f):
"""
Decorator for automatically synthesizing a circuit from a
function that takes only `bit` and/or `bits` objects as its
arguments and returns an output of type `bit` or `bits`.
>>> @synthesize
... def equal(x: bit, y: bit) -> bit:
... return (x & y) | ((1 - x) & (1 - y))
>>> xys = [bits([x, y]) for x in (0, 1) for y in (0, 1)]
>>> [equal.circuit.evaluate(xy) for xy in xys]
[[1], [0], [0], [1]]
>>> @synthesize
... def conjunction(xy: bits(2)) -> bits(2):
... return (xy[0], xy[0] & xy[1])
>>> xys = [bits([x, y]) for x in (0, 1) for y in (0, 1)]
>>> [conjunction.circuit.evaluate(xy) for xy in xys]
[[0, 0], [0, 0], [1, 0], [1, 1]]
>>> @synthesize
... def equal(x, y):
... return x & y
Traceback (most recent call last):
...
RuntimeError: automated circuit synthesis failed
"""
# Functions for determining types/signature from
# the type annotation of the decorated function.
type_in = lambda a: input(0) if a is bit else inputs([0] * a)
type_out = lambda a: output if a is bit else outputs
# For forward-compatibility with PEP 563.
eval_ = lambda a: eval(a) if isinstance(a, str) else a # pylint: disable=W0123
try:
# Construct the circuit and add it to the function as an attribute.
bit.circuit(circuit())
args_in = {
k: type_in(eval_(a))
for (k, a) in f.__annotations__.items() if k != 'return'
}
type_out(eval_(f.__annotations__['return']))(f(**args_in))
f.circuit = bit.circuit()
except:
raise RuntimeError('automated circuit synthesis failed') from None
# Return the original function.
return f
if __name__ == "__main__":
doctest.testmod() # pragma: no cover
| [
"doctest.testmod",
"parts.parts",
"circuit.circuit"
] | [((38084, 38101), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (38099, 38101), False, 'import doctest\n'), ((37671, 37680), 'circuit.circuit', 'circuit', ([], {}), '()\n', (37678, 37680), False, 'from circuit import op, gate, circuit, signature\n'), ((35533, 35558), 'parts.parts', 'parts', (['self'], {'length': 'other'}), '(self, length=other)\n', (35538, 35558), False, 'from parts import parts\n'), ((35800, 35818), 'parts.parts', 'parts', (['self', 'other'], {}), '(self, other)\n', (35805, 35818), False, 'from parts import parts\n')] |
import discord
import random
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import csv
async def plot_user_activity(client, ctx):
plt.style.use('fivethirtyeight')
df = pd.read_csv('innovators.csv', encoding= 'unicode_escape')
author = df['author'].to_list()
message_counter = {}
for i in author:
if i in message_counter:
message_counter[i] += 1
else:
message_counter[i] = 1
# for not mentioning the bot in the line graph.
message_counter.pop('ninza_bot_test')
authors_in_discord = list(message_counter.keys())
no_of_messages = list(message_counter.values())
plt.plot(authors_in_discord, no_of_messages, marker = 'o', markersize=10)
plt.title('msg sent by author in the server.')
plt.xlabel('Author')
plt.ylabel('Message_count')
plt.savefig('output2.png')
plt.tight_layout()
plt.close()
await ctx.send(file = discord.File('output2.png'))
| [
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"discord.File"
] | [((180, 212), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (193, 212), True, 'import matplotlib.pyplot as plt\n'), ((223, 279), 'pandas.read_csv', 'pd.read_csv', (['"""innovators.csv"""'], {'encoding': '"""unicode_escape"""'}), "('innovators.csv', encoding='unicode_escape')\n", (234, 279), True, 'import pandas as pd\n'), ((718, 789), 'matplotlib.pyplot.plot', 'plt.plot', (['authors_in_discord', 'no_of_messages'], {'marker': '"""o"""', 'markersize': '(10)'}), "(authors_in_discord, no_of_messages, marker='o', markersize=10)\n", (726, 789), True, 'import matplotlib.pyplot as plt\n'), ((797, 843), 'matplotlib.pyplot.title', 'plt.title', (['"""msg sent by author in the server."""'], {}), "('msg sent by author in the server.')\n", (806, 843), True, 'import matplotlib.pyplot as plt\n'), ((849, 869), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Author"""'], {}), "('Author')\n", (859, 869), True, 'import matplotlib.pyplot as plt\n'), ((875, 902), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Message_count"""'], {}), "('Message_count')\n", (885, 902), True, 'import matplotlib.pyplot as plt\n'), ((910, 936), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output2.png"""'], {}), "('output2.png')\n", (921, 936), True, 'import matplotlib.pyplot as plt\n'), ((942, 960), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (958, 960), True, 'import matplotlib.pyplot as plt\n'), ((966, 977), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (975, 977), True, 'import matplotlib.pyplot as plt\n'), ((1007, 1034), 'discord.File', 'discord.File', (['"""output2.png"""'], {}), "('output2.png')\n", (1019, 1034), False, 'import discord\n')] |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.getenv('SECRET_KEY', '')
DEBUG = False
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_main.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
class TestingConfig(Config):
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_main.db')
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
DEBUG = False
config_by_name = dict(
dev=DevelopmentConfig,
test=TestingConfig,
prod=ProductionConfig
)
key = Config.SECRET_KEY
| [
"os.path.dirname",
"os.path.join",
"os.getenv"
] | [((37, 62), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (52, 62), False, 'import os\n'), ((96, 123), 'os.getenv', 'os.getenv', (['"""SECRET_KEY"""', '""""""'], {}), "('SECRET_KEY', '')\n", (105, 123), False, 'import os\n'), ((239, 277), 'os.path.join', 'os.path.join', (['basedir', '"""flask_main.db"""'], {}), "(basedir, 'flask_main.db')\n", (251, 277), False, 'import os\n'), ((433, 471), 'os.path.join', 'os.path.join', (['basedir', '"""flask_main.db"""'], {}), "(basedir, 'flask_main.db')\n", (445, 471), False, 'import os\n')] |
"""Methods for working with ontologies and the OLS."""
from urllib.parse import quote_plus
import requests
OLS_API_ROOT = "http://www.ebi.ac.uk/ols/api"
# Curie means something like CL:0000001
def _ontology_name(curie):
"""Get the name of the ontology from the curie, CL or UBERON for example."""
return curie.split(":")[0]
def _ontology_value(curie):
"""Get the id component of the curie, 0000001 from CL:0000001 for example."""
return curie.split(":")[1]
def _double_encode(url):
"""Double url encode a url. This is required by the OLS API."""
return quote_plus(quote_plus(url))
def _iri(curie):
"""Get the iri from a curie. This is a bit hopeful that they all map to purl.obolibrary.org"""
if _ontology_name(curie) == "EFO":
return f"http://www.ebi.ac.uk/efo/EFO_{_ontology_value(curie)}"
return f"http://purl.obolibrary.org/obo/{_ontology_name(curie)}_{_ontology_value(curie)}"
class OntologyLookupError(Exception):
"""Exception for some problem with looking up ontology information."""
def _ontology_info_url(curie):
"""Get the to make a GET to to get information about an ontology term."""
# If the curie is empty, just return an empty string. This happens when there is no
# valid ontology value.
if not curie:
return ""
else:
return f"{OLS_API_ROOT}/ontologies/{_ontology_name(curie)}/terms/{_double_encode(_iri(curie))}"
def get_ontology_label(curie):
"""For a given curie like 'CL:1000413', get the label like 'endothelial cell of artery'"""
url = _ontology_info_url(curie)
if not url:
return ""
response = requests.get(url)
if not response.ok:
raise OntologyLookupError(
f"Curie {curie} lookup failed, got status code {response.status_code}: {response.text}"
)
return response.json()["label"]
def lookup_candidate_term(label, ontology="cl", method="select"):
"""Lookup candidate terms for a label. This is useful when there is an existing label in a
submitted dataset, and you want to find an appropriate ontology term.
Args:
label: the label to find ontology terms for
ontology: the ontology to search in, cl or uberon or efo for example
method: select or search. search provides much broader results
Returns:
list of (curie, label) tuples returned by OLS
"""
# using OLS REST API [https://www.ebi.ac.uk/ols/docs/api]
url = f"{OLS_API_ROOT}/{method}?q={quote_plus(label)}&ontology={ontology.lower()}"
response = requests.get(url)
if not response.ok:
raise OntologyLookupError(
f"Label {label} lookup failed, got status code {response.status_code}: {response.text}"
)
return [(r["obo_id"], r["label"]) for r in response.json()["response"]["docs"]]
| [
"urllib.parse.quote_plus",
"requests.get"
] | [((1646, 1663), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1658, 1663), False, 'import requests\n'), ((2550, 2567), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2562, 2567), False, 'import requests\n'), ((597, 612), 'urllib.parse.quote_plus', 'quote_plus', (['url'], {}), '(url)\n', (607, 612), False, 'from urllib.parse import quote_plus\n'), ((2487, 2504), 'urllib.parse.quote_plus', 'quote_plus', (['label'], {}), '(label)\n', (2497, 2504), False, 'from urllib.parse import quote_plus\n')] |
from __future__ import print_function
import httplib2
import os
import sys
import pickle
from apiclient import discovery
from apiclient import errors
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/gmail.labels'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Inbox Organize'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def GetLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
"""
for label in labels:
print ('Label id: %s - Label name: %s' % (label['id'], label['name']))
"""
return labels
except errors.HttpError as error:
print ('An error occurred: %s' % error)
def DeleteLabel(service, user_id, label_id):
try:
service.users().labels().delete(userId=user_id, id=label_id).execute()
print ('Label with id: %s deleted successfully.' % label_id)
except errors.HttpError as error:
print ('An error occurred: %s' % error)
def main():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
userId = 'me'
labels = GetLabels(service, userId)
for label in labels:
if (label['type'] == 'user'):
print('Deleting label:', label['name'])
DeleteLabel(service, userId, label['id'])
if __name__ == '__main__':
main()
| [
"os.path.exists",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"oauth2client.client.flow_from_clientsecrets",
"oauth2client.tools.run",
"oauth2client.file.Storage",
"httplib2.Http",
"oauth2client.tools.run_flow",
"apiclient.discovery.build",
"os.path.expanduser"
] | [((936, 959), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (954, 959), False, 'import os\n'), ((980, 1018), 'os.path.join', 'os.path.join', (['home_dir', '""".credentials"""'], {}), "(home_dir, '.credentials')\n", (992, 1018), False, 'import os\n'), ((1116, 1176), 'os.path.join', 'os.path.join', (['credential_dir', '"""gmail-python-quickstart.json"""'], {}), "(credential_dir, 'gmail-python-quickstart.json')\n", (1128, 1176), False, 'import os\n'), ((1198, 1222), 'oauth2client.file.Storage', 'Storage', (['credential_path'], {}), '(credential_path)\n', (1205, 1222), False, 'from oauth2client.file import Storage\n'), ((2440, 2481), 'apiclient.discovery.build', 'discovery.build', (['"""gmail"""', '"""v1"""'], {'http': 'http'}), "('gmail', 'v1', http=http)\n", (2455, 2481), False, 'from apiclient import discovery\n'), ((1029, 1059), 'os.path.exists', 'os.path.exists', (['credential_dir'], {}), '(credential_dir)\n', (1043, 1059), False, 'import os\n'), ((1067, 1094), 'os.makedirs', 'os.makedirs', (['credential_dir'], {}), '(credential_dir)\n', (1078, 1094), False, 'import os\n'), ((1311, 1369), 'oauth2client.client.flow_from_clientsecrets', 'client.flow_from_clientsecrets', (['CLIENT_SECRET_FILE', 'SCOPES'], {}), '(CLIENT_SECRET_FILE, SCOPES)\n', (1341, 1369), False, 'from oauth2client import client\n'), ((2410, 2425), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (2423, 2425), False, 'import httplib2\n'), ((289, 339), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'parents': '[tools.argparser]'}), '(parents=[tools.argparser])\n', (312, 339), False, 'import argparse\n'), ((1450, 1484), 'oauth2client.tools.run_flow', 'tools.run_flow', (['flow', 'store', 'flags'], {}), '(flow, store, flags)\n', (1464, 1484), False, 'from oauth2client import tools\n'), ((1568, 1590), 'oauth2client.tools.run', 'tools.run', (['flow', 'store'], {}), '(flow, store)\n', (1577, 1590), False, 'from oauth2client import tools\n')] |
import numpy as np
import sys
## ROCKSTAR ##
halostruct1 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32)
])
halostruct2 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('halfmass_radius',np.float32),
#('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32)
])
## ROCKSTAR-GALAXIES ##
halogalaxystruct1 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32),
('type',np.int32),
('sm',np.float32),
('gas',np.float32),
('bh',np.float32),
('peak_density',np.float32),
('av_density',np.float32),
])
def getRSformat(obj):
if obj.galaxies == 0:
if obj.format_revision == 0:
print('OUTDATED ROCKSTAR, PLEASE UPDATE!')
sys.exit()
elif obj.format_revision == 1:
if obj.debug: print('returning halostruct1')
return halostruct1
elif obj.format_revision == 2:
if obj.debug: print('returning halostruct2')
return halostruct2
else:
print('found HALO_FORMAT_REVISION=%d, if this is >2 email me!' %
obj.format_revision)
sys.exit()
elif obj.galaxies == 1:
if obj.format_revision == 0:
print('OUTDATED ROCKSTAR-GALAXIES, PLEASE UPDATE!')
sys.exit()
elif obj.format_revision == 1:
if obj.debug: print('returning halogalaxystruct1')
return halogalaxystruct1
else:
print('found HALO_FORMAT_REVISION=%d, if this is >1 email me!' %
obj.format_revision)
sys.exit()
| [
"numpy.dtype",
"sys.exit"
] | [((60, 1145), 'numpy.dtype', 'np.dtype', (["[('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.float32, (3,)),\n ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r', np.float32), (\n 'child_r', np.float32), ('vmax_r', np.float32), ('mgrav', np.float32),\n ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.float32), (\n 'klypin_rs', np.float32), ('vrms', np.float32), ('J', np.float32, (3,)),\n ('energy', np.float32), ('spin', np.float32), ('alt_m', np.float32, (4,\n )), ('Xoff', np.float32), ('Voff', np.float32), ('b_to_a', np.float32),\n ('c_to_a', np.float32), ('A', np.float32, (3,)), ('b_to_a2', np.float32\n ), ('c_to_a2', np.float32), ('A2', np.float32, (3,)), ('bullock_spin',\n np.float32), ('kin_to_pot', np.float32), ('m_pe_b', np.float32), (\n 'm_pe_d', np.float32), ('dummy1', np.float32), ('num_p', np.int64), (\n 'num_child_particles', np.int64), ('p_start', np.int64), ('desc', np.\n int64), ('flags', np.int64), ('n_core', np.int64), ('dummy2', np.\n float32), ('min_pos_err', np.float32), ('min_vel_err', np.float32), (\n 'min_bulkvel_err', np.float32)]"], {}), "([('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.\n float32, (3,)), ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r',\n np.float32), ('child_r', np.float32), ('vmax_r', np.float32), ('mgrav',\n np.float32), ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.\n float32), ('klypin_rs', np.float32), ('vrms', np.float32), ('J', np.\n float32, (3,)), ('energy', np.float32), ('spin', np.float32), ('alt_m',\n np.float32, (4,)), ('Xoff', np.float32), ('Voff', np.float32), (\n 'b_to_a', np.float32), ('c_to_a', np.float32), ('A', np.float32, (3,)),\n ('b_to_a2', np.float32), ('c_to_a2', np.float32), ('A2', np.float32, (3\n ,)), ('bullock_spin', np.float32), ('kin_to_pot', np.float32), (\n 'm_pe_b', np.float32), ('m_pe_d', np.float32), ('dummy1', np.float32),\n ('num_p', np.int64), ('num_child_particles', np.int64), ('p_start', np.\n int64), ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),\n ('dummy2', np.float32), ('min_pos_err', np.float32), ('min_vel_err', np\n .float32), ('min_bulkvel_err', np.float32)])\n", (68, 1145), True, 'import numpy as np\n'), ((2082, 3178), 'numpy.dtype', 'np.dtype', (["[('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.float32, (3,)),\n ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r', np.float32), (\n 'child_r', np.float32), ('vmax_r', np.float32), ('mgrav', np.float32),\n ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.float32), (\n 'klypin_rs', np.float32), ('vrms', np.float32), ('J', np.float32, (3,)),\n ('energy', np.float32), ('spin', np.float32), ('alt_m', np.float32, (4,\n )), ('Xoff', np.float32), ('Voff', np.float32), ('b_to_a', np.float32),\n ('c_to_a', np.float32), ('A', np.float32, (3,)), ('b_to_a2', np.float32\n ), ('c_to_a2', np.float32), ('A2', np.float32, (3,)), ('bullock_spin',\n np.float32), ('kin_to_pot', np.float32), ('m_pe_b', np.float32), (\n 'm_pe_d', np.float32), ('halfmass_radius', np.float32), ('num_p', np.\n int64), ('num_child_particles', np.int64), ('p_start', np.int64), (\n 'desc', np.int64), ('flags', np.int64), ('n_core', np.int64), ('dummy2',\n np.float32), ('min_pos_err', np.float32), ('min_vel_err', np.float32),\n ('min_bulkvel_err', np.float32)]"], {}), "([('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.\n float32, (3,)), ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r',\n np.float32), ('child_r', np.float32), ('vmax_r', np.float32), ('mgrav',\n np.float32), ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.\n float32), ('klypin_rs', np.float32), ('vrms', np.float32), ('J', np.\n float32, (3,)), ('energy', np.float32), ('spin', np.float32), ('alt_m',\n np.float32, (4,)), ('Xoff', np.float32), ('Voff', np.float32), (\n 'b_to_a', np.float32), ('c_to_a', np.float32), ('A', np.float32, (3,)),\n ('b_to_a2', np.float32), ('c_to_a2', np.float32), ('A2', np.float32, (3\n ,)), ('bullock_spin', np.float32), ('kin_to_pot', np.float32), (\n 'm_pe_b', np.float32), ('m_pe_d', np.float32), ('halfmass_radius', np.\n float32), ('num_p', np.int64), ('num_child_particles', np.int64), (\n 'p_start', np.int64), ('desc', np.int64), ('flags', np.int64), (\n 'n_core', np.int64), ('dummy2', np.float32), ('min_pos_err', np.float32\n ), ('min_vel_err', np.float32), ('min_bulkvel_err', np.float32)])\n", (2090, 3178), True, 'import numpy as np\n'), ((4191, 5423), 'numpy.dtype', 'np.dtype', (["[('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.float32, (3,)),\n ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r', np.float32), (\n 'child_r', np.float32), ('vmax_r', np.float32), ('mgrav', np.float32),\n ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.float32), (\n 'klypin_rs', np.float32), ('vrms', np.float32), ('J', np.float32, (3,)),\n ('energy', np.float32), ('spin', np.float32), ('alt_m', np.float32, (4,\n )), ('Xoff', np.float32), ('Voff', np.float32), ('b_to_a', np.float32),\n ('c_to_a', np.float32), ('A', np.float32, (3,)), ('b_to_a2', np.float32\n ), ('c_to_a2', np.float32), ('A2', np.float32, (3,)), ('bullock_spin',\n np.float32), ('kin_to_pot', np.float32), ('m_pe_b', np.float32), (\n 'm_pe_d', np.float32), ('dummy1', np.float32), ('num_p', np.int64), (\n 'num_child_particles', np.int64), ('p_start', np.int64), ('desc', np.\n int64), ('flags', np.int64), ('n_core', np.int64), ('dummy2', np.\n float32), ('min_pos_err', np.float32), ('min_vel_err', np.float32), (\n 'min_bulkvel_err', np.float32), ('type', np.int32), ('sm', np.float32),\n ('gas', np.float32), ('bh', np.float32), ('peak_density', np.float32),\n ('av_density', np.float32)]"], {}), "([('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.\n float32, (3,)), ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r',\n np.float32), ('child_r', np.float32), ('vmax_r', np.float32), ('mgrav',\n np.float32), ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.\n float32), ('klypin_rs', np.float32), ('vrms', np.float32), ('J', np.\n float32, (3,)), ('energy', np.float32), ('spin', np.float32), ('alt_m',\n np.float32, (4,)), ('Xoff', np.float32), ('Voff', np.float32), (\n 'b_to_a', np.float32), ('c_to_a', np.float32), ('A', np.float32, (3,)),\n ('b_to_a2', np.float32), ('c_to_a2', np.float32), ('A2', np.float32, (3\n ,)), ('bullock_spin', np.float32), ('kin_to_pot', np.float32), (\n 'm_pe_b', np.float32), ('m_pe_d', np.float32), ('dummy1', np.float32),\n ('num_p', np.int64), ('num_child_particles', np.int64), ('p_start', np.\n int64), ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),\n ('dummy2', np.float32), ('min_pos_err', np.float32), ('min_vel_err', np\n .float32), ('min_bulkvel_err', np.float32), ('type', np.int32), ('sm',\n np.float32), ('gas', np.float32), ('bh', np.float32), ('peak_density',\n np.float32), ('av_density', np.float32)])\n", (4199, 5423), True, 'import numpy as np\n'), ((6913, 6923), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6921, 6923), False, 'import sys\n'), ((7489, 7499), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7497, 7499), False, 'import sys\n'), ((7336, 7346), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7344, 7346), False, 'import sys\n'), ((7784, 7794), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7792, 7794), False, 'import sys\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""ics2entropiawiki
Read an ics file with the entropia events and insert them in to the
entropia homepage wiki.
Example:
$ ics2entropiawiki.py --config /etc/ics2entropiawiki/config.ini
Inserts events not in the past to the "Termine" Wiki page and appends past
events to the "Vergangene_Termine" Site
"""
import locale
import configparser
import re
import requests
from argparse import ArgumentParser
from datetime import timedelta, datetime
from ics import Calendar
from mwclient import Site
from dateutil.tz import tzlocal
BOTWARNING = """
<!--
This text is automatically generated by the ics2entropiawiki bot, everything you write and everything you edit
WILL BE OVERWRITTEN
Dieser Text ist vom ics2entropiawiki bot automatisch generiert. Alles was hier manuell editiert, hinzugefügt wird
WIRD ÜBERSCHRIEBEN
-->
"""
TABLE_HEADER = """
{| class="termine" border="1" cellspacing="0" cellpadding="5" width="100%" style="border-collapse:collapse;"
! style="width:250px;" | Datum !! style="width:50px;" | Zeit !! Ort !! Beschreibung\
"""
ARCHIVE_TABLE_HEADER = """
{| class="termine" border="1" cellspacing="0" cellpadding="5" style="border-collapse:collapse;" width="100%"
|width=15%|'''Datum'''
|width=6%|'''Zeit'''
|width=15%|'''Ort'''
|width=69%|'''Beschreibung'''
"""
TABLE_FOOTER = (
"|}",
"\n",
"Weitere Links: [[Vorlage:Termine|Termine]] ",
"([https://entropia.de/index.php?title=Vorlage:Termine&action=edit Bearbeiten]),",
" [[Vorlage:Vergangene_Termine|Vergangene Termine]], [[Anfahrt]]"
)
LINE_SEPARATOR = "|-\n"
try:
locale.setlocale(locale.LC_ALL, 'de_DE.utf8')
except locale.Error:
pass
class EntropiaEvent:
"""
Parses an ics Event and converts it to an entropia-wiki suitable form
"""
def __init__(self, event):
"""
:param event: The event to be evaluated
:type event: ics.event.Event
"""
self.event = event
self.begintime = event.begin.datetime.astimezone()
self.endtime = event._end_time.datetime.astimezone()
@property
def location(self):
"""
Retrieve the location of an event
:return: location
:rtype: str
"""
locations = {
"entropia": "[[Anfahrt|Entropia]]",
}
location = " "
if self.event.location:
location = self.event.location
if location.lower() in locations.keys():
location = locations[location.lower()]
return location
@property
def begin_date(self):
"""
:return: Entropia-Wiki formatted begin time
:rtype: str
"""
return self.begintime.strftime("%a., %d.%m.%Y")
@property
def end_date(self):
"""
:return: Entropia-Wiki formatted end time
:rtype: str
"""
end_date = ""
if self.endtime - self.begintime > timedelta(days=1):
end_date = " - " + self.endtime.strftime("%a., %d.%m.%Y")
return end_date
@property
def days_to_event(self):
"""
:return: Days to the start of the event
:rtype: datetime.timedelta
"""
return self.endtime - datetime.now(tz=tzlocal())
@property
def is_past_event(self):
"""
:return: Check if the event lies in the past
:rtype: bool
"""
return self.days_to_event < timedelta(days=0)
@property
def start_time(self):
"""
:return: The starting time of the event
:rtype: str
"""
start_time = " "
if not self.event.all_day:
start_time = self.begintime.strftime("%H:%M")
return start_time
@property
def description(self):
"""
:return: The event's description
:rtype: str
"""
links = None
wiki = None
event = self.event
if event.description:
links = re.findall("^[Ll]ink:(.*)$", event.description)
wiki = re.findall("^[Ww]iki:(.*)$", event.description)
if links and event.name:
description = "["+links[0]+" "+event.name+"]"
elif wiki:
description = wiki[0]
elif not event.name:
description = "N.A."
else:
description = event.name
return description
def __str__(self):
"""
:return: A wiki line describing the event
:rtype: str
"""
return ("| " +
self.begin_date +
self.end_date +
" || " +
self.start_time +
" || " +
self.location +
" || " +
self.description
)
def append_past_events(past_events, wiki_user, wiki_pw, wiki_archive):
"""
Append the "new" past events to the wiki archive page
:param past_events: the past events that were not added to the events page
:type past_events: list
:param wiki_user: bot user for the wiki
:type wiki_user: str
:param wiki_pw: password for the wiki user
:type wiki_pw: str
:param wiki_archive: archive page
:type wiki_archive: str
:return: None
:rtype: None
"""
site = Site('entropia.de', path='/')
site.login(wiki_user, wiki_pw)
page = site.pages[wiki_archive]
text = page.text().split('\n')
last_table_position = 0
for event in past_events:
year_header = "== {} ==".format(event.endtime.strftime('%Y'))
for index, txtline in enumerate(text):
if txtline == '|}':
last_table_position = index
if str(event) in text:
continue
if year_header in text:
append_list = (
'\n' +
LINE_SEPARATOR +
str(event)
)
text = text[:last_table_position]+[append_list, ]+text[last_table_position:]
else:
append_list = (
3 * '\n' +
year_header +
ARCHIVE_TABLE_HEADER +
'\n' +
LINE_SEPARATOR +
'\n' +
str(event) +
'\n|}'
)
text = text[:last_table_position+1]+[append_list, ]+text[last_table_position+1:]
page.save("\n".join(text))
def get_args():
"""
Retrieve arguments from the command line, the config file respectively
:return: Parsed arguments from command line, config file
:rtype: list
"""
parser = ArgumentParser()
parser.add_argument(
"-c", "--config",
default="/etc/ics2entropiawiki/config.ini",
dest="configfile",
help="Configuration file path",
metavar="CONFIG"
)
parser.add_argument(
"-u", "--url",
dest="ics_url",
help="The URL under which the ICS-file can be retrieved",
metavar="URL",
)
parser.add_argument(
"-f", "--file",
dest="local_file",
help="Local ics file",
metavar="FILE"
)
parser.add_argument(
"--wiki-user",
dest="wiki_user",
help="Wiki user",
metavar="WIKIUSER"
)
parser.add_argument(
"--wiki-password",
dest="wiki_pw",
help="Wiki user's password",
metavar="WIKIPW"
)
parser.add_argument(
"--wiki-page",
dest="wiki_page",
help='Wiki page',
metavar='WIKIPAGE'
)
parser.add_argument(
"--wiki-archive",
dest="wiki_archive",
help='Wiki archive',
metavar='WIKIARCHIVE'
)
parser.add_argument(
"-d", "--debug",
dest="debug",
action="store_true",
default=False
)
args = parser.parse_args()
configfile = args.configfile
ics_url = args.ics_url
file = args.local_file
wiki = {
'user': args.wiki_user,
'pass': args.wiki_pw,
'page': args.wiki_page,
'archive': args.wiki_archive,
}
debug = args.debug
if configfile:
config = configparser.ConfigParser()
config.read(configfile)
try:
ics_url = config["default"]["url"]
wiki = config["wiki"]
except KeyError as error:
print("Please have a look at the sample config provided with the package")
raise error
return ics_url, file, wiki, debug
def deradicalise_ical(ics):
"""
:param ics: input file
:type ics: str
:return: file with remove radicale_headers
"""
deradicalised = ""
for line in ics.splitlines():
if 'X-RADICALE-NAME:' not in line:
deradicalised += "\n"+line
return deradicalised
def main():
"""
:return: None
:rtype: None
"""
ics_url, file, wiki, debug = get_args()
event_strings = []
past_events = []
if file:
calendar = Calendar(deradicalise_ical(open(file).read()))
else:
ics_result = requests.get(ics_url)
ics_result.encoding = 'utf-8'
calendar = Calendar(deradicalise_ical(ics_result.text))
for event in sorted(calendar.events, key=lambda ev: ev.begin):
event = EntropiaEvent(event)
if not event.is_past_event:
event_strings.append(
"\n" +
LINE_SEPARATOR +
str(event)
)
else:
past_events.append(event)
append_past_events(past_events, wiki['user'], wiki['pass'], wiki['archive'])
termine = BOTWARNING + "\n" + TABLE_HEADER + "\n" + "".join(event_strings) + "\n" + "".join(TABLE_FOOTER)
if debug:
print(termine)
site = Site('entropia.de', path='/')
site.login(wiki['user'], wiki['pass'])
page = site.pages[wiki['page']]
if termine:
page.save(termine, "Terminbot was here")
page.purge()
if __name__ == '__main__':
main()
| [
"configparser.ConfigParser",
"locale.setlocale",
"argparse.ArgumentParser",
"dateutil.tz.tzlocal",
"mwclient.Site",
"requests.get",
"re.findall",
"datetime.timedelta"
] | [((1649, 1694), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '"""de_DE.utf8"""'], {}), "(locale.LC_ALL, 'de_DE.utf8')\n", (1665, 1694), False, 'import locale\n'), ((5330, 5359), 'mwclient.Site', 'Site', (['"""entropia.de"""'], {'path': '"""/"""'}), "('entropia.de', path='/')\n", (5334, 5359), False, 'from mwclient import Site\n'), ((6626, 6642), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (6640, 6642), False, 'from argparse import ArgumentParser\n'), ((9755, 9784), 'mwclient.Site', 'Site', (['"""entropia.de"""'], {'path': '"""/"""'}), "('entropia.de', path='/')\n", (9759, 9784), False, 'from mwclient import Site\n'), ((8161, 8188), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (8186, 8188), False, 'import configparser\n'), ((9067, 9088), 'requests.get', 'requests.get', (['ics_url'], {}), '(ics_url)\n', (9079, 9088), False, 'import requests\n'), ((2981, 2998), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2990, 2998), False, 'from datetime import timedelta, datetime\n'), ((3481, 3498), 'datetime.timedelta', 'timedelta', ([], {'days': '(0)'}), '(days=0)\n', (3490, 3498), False, 'from datetime import timedelta, datetime\n'), ((4024, 4071), 're.findall', 're.findall', (['"""^[Ll]ink:(.*)$"""', 'event.description'], {}), "('^[Ll]ink:(.*)$', event.description)\n", (4034, 4071), False, 'import re\n'), ((4091, 4138), 're.findall', 're.findall', (['"""^[Ww]iki:(.*)$"""', 'event.description'], {}), "('^[Ww]iki:(.*)$', event.description)\n", (4101, 4138), False, 'import re\n'), ((3292, 3301), 'dateutil.tz.tzlocal', 'tzlocal', ([], {}), '()\n', (3299, 3301), False, 'from dateutil.tz import tzlocal\n')] |
import os
import cflearn
import platform
import unittest
from cfdata.tabular import TabularDataset
num_jobs = 0 if platform.system() == "Linux" else 2
logging_folder = "__test_zoo__"
class TestZoo(unittest.TestCase):
@staticmethod
def _test_zoo_core(model: str) -> None:
x, y = TabularDataset.iris().xy
zoo_folder = os.path.join(logging_folder, f"__{model}__")
zoo = cflearn.Zoo(model)
for key, config in zoo.benchmarks.items():
local_logging_folder = os.path.join(zoo_folder, key)
config["logging_folder"] = local_logging_folder
m = cflearn.make(model, **config).fit(x, y)
cflearn.evaluate(x, y, pipelines=m)
cflearn._rmtree(logging_folder)
def test_fcnn_zoo(self) -> None:
self._test_zoo_core("fcnn")
def test_tree_dnn_zoo(self) -> None:
self._test_zoo_core("tree_dnn")
if __name__ == "__main__":
unittest.main()
| [
"cflearn.evaluate",
"cfdata.tabular.TabularDataset.iris",
"os.path.join",
"cflearn.Zoo",
"platform.system",
"cflearn.make",
"unittest.main",
"cflearn._rmtree"
] | [((931, 946), 'unittest.main', 'unittest.main', ([], {}), '()\n', (944, 946), False, 'import unittest\n'), ((117, 134), 'platform.system', 'platform.system', ([], {}), '()\n', (132, 134), False, 'import platform\n'), ((344, 388), 'os.path.join', 'os.path.join', (['logging_folder', 'f"""__{model}__"""'], {}), "(logging_folder, f'__{model}__')\n", (356, 388), False, 'import os\n'), ((403, 421), 'cflearn.Zoo', 'cflearn.Zoo', (['model'], {}), '(model)\n', (414, 421), False, 'import cflearn\n'), ((710, 741), 'cflearn._rmtree', 'cflearn._rmtree', (['logging_folder'], {}), '(logging_folder)\n', (725, 741), False, 'import cflearn\n'), ((298, 319), 'cfdata.tabular.TabularDataset.iris', 'TabularDataset.iris', ([], {}), '()\n', (317, 319), False, 'from cfdata.tabular import TabularDataset\n'), ((508, 537), 'os.path.join', 'os.path.join', (['zoo_folder', 'key'], {}), '(zoo_folder, key)\n', (520, 537), False, 'import os\n'), ((666, 701), 'cflearn.evaluate', 'cflearn.evaluate', (['x', 'y'], {'pipelines': 'm'}), '(x, y, pipelines=m)\n', (682, 701), False, 'import cflearn\n'), ((614, 643), 'cflearn.make', 'cflearn.make', (['model'], {}), '(model, **config)\n', (626, 643), False, 'import cflearn\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import scipy
import unittest
import time
from nearpy import Engine
from nearpy.distances import CosineDistance
from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper
def example2():
# Dimension of feature space
DIM = 100
# Number of data points (dont do too much because of exact search)
POINTS = 20000
##########################################################
print('Performing indexing with HashPermutations...')
t0 = time.time()
# Create permutations meta-hash
permutations = HashPermutations('permut')
# Create binary hash as child hash
rbp_perm = RandomBinaryProjections('rbp_perm', 14)
rbp_conf = {'num_permutation':50,'beam_size':10,'num_neighbour':100}
# Add rbp as child hash of permutations hash
permutations.add_child_hash(rbp_perm, rbp_conf)
# Create engine
engine_perm = Engine(DIM, lshashes=[permutations], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm.store_vector(v)
# Then update permuted index
permutations.build_permuted_index()
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 3
print('\nNeighbour distances with HashPermutations:')
print(' -> Candidate count is %d' % engine_perm.candidate_count(query))
results = engine_perm.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix, query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with HashPermutationMapper...')
t0 = time.time()
# Create permutations meta-hash
permutations2 = HashPermutationMapper('permut2')
# Create binary hash as child hash
rbp_perm2 = RandomBinaryProjections('rbp_perm2', 14)
# Add rbp as child hash of permutations hash
permutations2.add_child_hash(rbp_perm2)
# Create engine
engine_perm2 = Engine(DIM, lshashes=[permutations2], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm2.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with HashPermutationMapper:')
print(' -> Candidate count is %d' % engine_perm2.candidate_count(query))
results = engine_perm2.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with multiple binary hashes...')
t0 = time.time()
hashes = []
for k in range(20):
hashes.append(RandomBinaryProjections('rbp_%d' % k, 10))
# Create engine
engine_rbps = Engine(DIM, lshashes=hashes, distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_rbps.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with multiple binary hashes:')
print(' -> Candidate count is %d' % engine_rbps.candidate_count(query))
results = engine_rbps.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
| [
"nearpy.hashes.RandomBinaryProjections",
"nearpy.hashes.HashPermutationMapper",
"numpy.zeros",
"nearpy.distances.CosineDistance",
"time.time",
"numpy.random.randn",
"nearpy.hashes.HashPermutations"
] | [((1613, 1624), 'time.time', 'time.time', ([], {}), '()\n', (1622, 1624), False, 'import time\n'), ((1681, 1707), 'nearpy.hashes.HashPermutations', 'HashPermutations', (['"""permut"""'], {}), "('permut')\n", (1697, 1707), False, 'from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper\n'), ((1763, 1802), 'nearpy.hashes.RandomBinaryProjections', 'RandomBinaryProjections', (['"""rbp_perm"""', '(14)'], {}), "('rbp_perm', 14)\n", (1786, 1802), False, 'from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper\n'), ((2133, 2159), 'numpy.zeros', 'numpy.zeros', (['(POINTS, DIM)'], {}), '((POINTS, DIM))\n', (2144, 2159), False, 'import numpy\n'), ((2365, 2376), 'time.time', 'time.time', ([], {}), '()\n', (2374, 2376), False, 'import time\n'), ((2468, 2491), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (2486, 2491), False, 'import numpy\n'), ((3129, 3140), 'time.time', 'time.time', ([], {}), '()\n', (3138, 3140), False, 'import time\n'), ((3198, 3230), 'nearpy.hashes.HashPermutationMapper', 'HashPermutationMapper', (['"""permut2"""'], {}), "('permut2')\n", (3219, 3230), False, 'from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper\n'), ((3287, 3327), 'nearpy.hashes.RandomBinaryProjections', 'RandomBinaryProjections', (['"""rbp_perm2"""', '(14)'], {}), "('rbp_perm2', 14)\n", (3310, 3327), False, 'from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper\n'), ((3579, 3605), 'numpy.zeros', 'numpy.zeros', (['(POINTS, DIM)'], {}), '((POINTS, DIM))\n', (3590, 3605), False, 'import numpy\n'), ((3738, 3749), 'time.time', 'time.time', ([], {}), '()\n', (3747, 3749), False, 'import time\n'), ((3841, 3864), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (3859, 3864), False, 'import numpy\n'), ((4509, 4520), 'time.time', 'time.time', ([], {}), '()\n', (4518, 4520), False, 'import time\n'), ((4774, 4800), 'numpy.zeros', 'numpy.zeros', (['(POINTS, DIM)'], {}), '((POINTS, DIM))\n', (4785, 4800), False, 'import numpy\n'), ((4932, 4943), 'time.time', 'time.time', ([], {}), '()\n', (4941, 4943), False, 'import time\n'), ((5035, 5058), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (5053, 5058), False, 'import numpy\n'), ((2199, 2222), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (2217, 2222), False, 'import numpy\n'), ((3645, 3668), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (3663, 3668), False, 'import numpy\n'), ((4840, 4863), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (4858, 4863), False, 'import numpy\n'), ((2063, 2079), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (2077, 2079), False, 'from nearpy.distances import CosineDistance\n'), ((2868, 2884), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (2882, 2884), False, 'from nearpy.distances import CosineDistance\n'), ((3509, 3525), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (3523, 3525), False, 'from nearpy.distances import CosineDistance\n'), ((4248, 4264), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (4262, 4264), False, 'from nearpy.distances import CosineDistance\n'), ((4584, 4625), 'nearpy.hashes.RandomBinaryProjections', 'RandomBinaryProjections', (["('rbp_%d' % k)", '(10)'], {}), "('rbp_%d' % k, 10)\n", (4607, 4625), False, 'from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper\n'), ((4704, 4720), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (4718, 4720), False, 'from nearpy.distances import CosineDistance\n'), ((5441, 5457), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (5455, 5457), False, 'from nearpy.distances import CosineDistance\n')] |
from discord.ext import commands, tasks # Bot Commands Frameworkをインポート
import traceback # エラー表示のためにインポート
import os
import discord
import r
TOKEN = os.environ['DISCORD_BOT_TOKEN']
prefix = os.environ['DISCORD_BOT_PREFIX'] #プレフィックス
# 読み込むコグの名前を格納しておく。
INITIAL_EXTENSIONS = [
'cogs.eval',
'cogs.glchat',
'cogs.gladd',
'cogs.gldel'
]
# クラスの定義。ClientのサブクラスであるBotクラスを継承。
class MyBot(commands.Bot):
# MyBotのコンストラクタ。
def __init__(self, command_prefix, help_command):
# スーパークラスのコンストラクタに値を渡して実行。
super().__init__(command_prefix,help_command)
# INITIAL_COGSに格納されている名前から、コグを読み込む。
# エラーが発生した場合は、エラー内容を表示。
for cog in INITIAL_EXTENSIONS:
try:
self.load_extension(cog)
except Exception:
traceback.print_exc()
# Botの準備完了時に呼び出されるイベント
async def on_ready(self):
print(self.user.name) # ボットの名前
print(self.user.id) # ボットのID
print(discord.__version__) # discord.pyのバージョン
print('----------------')
print('Hello World !!')
await self.change_presence(status=discord.Status.idle,activity=discord.Game(name=f'Ping:{self.ws.latency * 1000:.0f}ms'))
conn=r.connect()
ky=conn.keys()
global_ch="gloch"
count=0
for i in ky:
i=str(i)
if i == global_ch:
count+=1
if count>0:
smsd=conn.smembers(global_ch)
count=0
for q in smsd:
q=str(q)
if q=="0":
count+=1
if count>0:
p=conn.srem(global_ch,"0")
if p==True:
print("正常起動")
else:
print("異常発生")
else:
print(ky)
else:
p=conn.sadd(global_ch,"0")
if p==True:
print("正常起動")
else:
print("異常発生")
class JapaneseHelpCommand(commands.DefaultHelpCommand):
def __init__(self):
super().__init__()
self.commands_heading = "コマンド:"
self.no_category = "その他"
self.command_attrs["help"] = "コマンド一覧と簡単な説明を表示"
def get_ending_note(self):
return (f"各コマンドの説明: {prefix}help <コマンド名>\n"
f"各カテゴリの説明: {prefix}help <カテゴリ名>\n")
#MyBotのインスタンス化及び起動処理。
if __name__ == '__main__':
bot = MyBot(command_prefix=prefix,help_command=JapaneseHelpCommand()) # command_prefixはコマンドの最初の文字として使うもの。 e.g. !ping
bot.run(TOKEN) # Botのトークン
| [
"r.connect",
"traceback.print_exc",
"discord.Game"
] | [((1218, 1229), 'r.connect', 'r.connect', ([], {}), '()\n', (1227, 1229), False, 'import r\n'), ((795, 816), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (814, 816), False, 'import traceback\n'), ((1145, 1202), 'discord.Game', 'discord.Game', ([], {'name': 'f"""Ping:{self.ws.latency * 1000:.0f}ms"""'}), "(name=f'Ping:{self.ws.latency * 1000:.0f}ms')\n", (1157, 1202), False, 'import discord\n')] |
import shutil
from pathlib import Path
from unittest import TestCase
from unittest.mock import Mock
from unittest.mock import patch
from foliant.config.downloadfile import download_file
from foliant.config.downloadfile import get_file_ext_from_url
from foliant.config.downloadfile import get_file_name_from_url
class TestDownloadFile(TestCase):
def setUp(self):
self.project_dir = (Path(__file__).parent / 'project_dir').resolve()
self.project_dir.mkdir(exist_ok=True)
def tearDown(self):
shutil.rmtree(self.project_dir, ignore_errors=True)
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_only_url(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
download_file(root_dir=self.project_dir, url=url)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertEqual(request.headers, {})
self.assertIsNone(context)
with open(self.project_dir / 'myfile.txt') as f:
self.assertEqual(f.read(), 'File content')
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_save_to(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
save_to = 'subdir1/subdir2/downloaded.txt'
download_file(root_dir=self.project_dir, url=url, save_to=save_to)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertEqual(request.headers, {})
self.assertIsNone(context)
with open(self.project_dir / save_to) as f:
self.assertEqual(f.read(), 'File content')
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_with_auth(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
download_file(
root_dir=self.project_dir,
url=url,
login='john',
password='<PASSWORD>'
)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertIn('Authorization', request.headers)
self.assertIsNone(context)
with open(self.project_dir / 'myfile.txt') as f:
self.assertEqual(f.read(), 'File content')
class TestGetFileNameFromURL(TestCase):
def test_with_ext(self):
url = 'http://example.com/sub/myfile.txt'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile.txt')
def test_no_ext(self):
url = 'http://example.com/sub/myfile'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile')
def test_with_clutter(self):
url = 'http://example.com/sub/myfile.txt?param=val&foo=bar'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile.txt')
class TestGetFileExtFromURL(TestCase):
def test_with_ext(self):
url = 'http://example.com/sub/myfile.txt'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '.txt')
def test_no_ext(self):
url = 'http://example.com/sub/myfile'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '')
def test_with_clutter(self):
url = 'http://example.com/sub/myfile.txt?param=val&foo=bar'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '.txt')
| [
"foliant.config.downloadfile.get_file_name_from_url",
"unittest.mock.Mock",
"foliant.config.downloadfile.get_file_ext_from_url",
"pathlib.Path",
"foliant.config.downloadfile.download_file",
"shutil.rmtree",
"unittest.mock.patch"
] | [((584, 643), 'unittest.mock.patch', 'patch', (['"""foliant.config.downloadfile.urlopen"""'], {'autospec': '(True)'}), "('foliant.config.downloadfile.urlopen', autospec=True)\n", (589, 643), False, 'from unittest.mock import patch\n'), ((1221, 1280), 'unittest.mock.patch', 'patch', (['"""foliant.config.downloadfile.urlopen"""'], {'autospec': '(True)'}), "('foliant.config.downloadfile.urlopen', autospec=True)\n", (1226, 1280), False, 'from unittest.mock import patch\n'), ((1920, 1979), 'unittest.mock.patch', 'patch', (['"""foliant.config.downloadfile.urlopen"""'], {'autospec': '(True)'}), "('foliant.config.downloadfile.urlopen', autospec=True)\n", (1925, 1979), False, 'from unittest.mock import patch\n'), ((526, 577), 'shutil.rmtree', 'shutil.rmtree', (['self.project_dir'], {'ignore_errors': '(True)'}), '(self.project_dir, ignore_errors=True)\n', (539, 577), False, 'import shutil\n'), ((706, 712), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (710, 712), False, 'from unittest.mock import Mock\n'), ((872, 921), 'foliant.config.downloadfile.download_file', 'download_file', ([], {'root_dir': 'self.project_dir', 'url': 'url'}), '(root_dir=self.project_dir, url=url)\n', (885, 921), False, 'from foliant.config.downloadfile import download_file\n'), ((1342, 1348), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1346, 1348), False, 'from unittest.mock import Mock\n'), ((1559, 1625), 'foliant.config.downloadfile.download_file', 'download_file', ([], {'root_dir': 'self.project_dir', 'url': 'url', 'save_to': 'save_to'}), '(root_dir=self.project_dir, url=url, save_to=save_to)\n', (1572, 1625), False, 'from foliant.config.downloadfile import download_file\n'), ((2043, 2049), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (2047, 2049), False, 'from unittest.mock import Mock\n'), ((2209, 2300), 'foliant.config.downloadfile.download_file', 'download_file', ([], {'root_dir': 'self.project_dir', 'url': 'url', 'login': '"""john"""', 'password': '"""<PASSWORD>"""'}), "(root_dir=self.project_dir, url=url, login='john', password=\n '<PASSWORD>')\n", (2222, 2300), False, 'from foliant.config.downloadfile import download_file\n'), ((2793, 2820), 'foliant.config.downloadfile.get_file_name_from_url', 'get_file_name_from_url', (['url'], {}), '(url)\n', (2815, 2820), False, 'from foliant.config.downloadfile import get_file_name_from_url\n'), ((2955, 2982), 'foliant.config.downloadfile.get_file_name_from_url', 'get_file_name_from_url', (['url'], {}), '(url)\n', (2977, 2982), False, 'from foliant.config.downloadfile import get_file_name_from_url\n'), ((3141, 3168), 'foliant.config.downloadfile.get_file_name_from_url', 'get_file_name_from_url', (['url'], {}), '(url)\n', (3163, 3168), False, 'from foliant.config.downloadfile import get_file_name_from_url\n'), ((3348, 3374), 'foliant.config.downloadfile.get_file_ext_from_url', 'get_file_ext_from_url', (['url'], {}), '(url)\n', (3369, 3374), False, 'from foliant.config.downloadfile import get_file_ext_from_url\n'), ((3501, 3527), 'foliant.config.downloadfile.get_file_ext_from_url', 'get_file_ext_from_url', (['url'], {}), '(url)\n', (3522, 3527), False, 'from foliant.config.downloadfile import get_file_ext_from_url\n'), ((3678, 3704), 'foliant.config.downloadfile.get_file_ext_from_url', 'get_file_ext_from_url', (['url'], {}), '(url)\n', (3699, 3704), False, 'from foliant.config.downloadfile import get_file_ext_from_url\n'), ((398, 412), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (402, 412), False, 'from pathlib import Path\n')] |
import matplotlib.pyplot as plt
import numpy as np
from fears.utils import results_manager, plotter, dir_manager
import os
suffix = '07212021_0001'
data_folder = 'results_' + suffix
exp_info_file = 'experiment_info_' + suffix + '.p'
exp_folders,exp_info = results_manager.get_experiment_results(data_folder,
exp_info_file)
max_cells = exp_info.populations[0].max_cells
n_sims = exp_info.n_sims
k_abs = exp_info.slopes
exp_folders.reverse()
k_abs = np.flip(k_abs)
fig,ax = plt.subplots(nrows=2,ncols=2,figsize=(4,4))
pop = exp_info.populations[0]
ax = ax.reshape((len(k_abs),))
axnum = 0
tc_axes=[]
drug_axes=[]
for exp in exp_folders:
k_abs_t = exp[exp.find('=')+1:]
k_abs_t = float(k_abs_t)
num = np.argwhere(k_abs == k_abs_t)
num = num[0,0]
# generate timecourse axes
tcax = ax[axnum]
# da = tcax.twinx()
sim_files = os.listdir(path=exp)
sim_files = sorted(sim_files)
survive_count = 0
counts_total = None
k=0
while k < len(sim_files):
# for sim in sim_files:
sim = sim_files[k]
sim = exp + os.sep + sim
data = results_manager.get_data(sim)
dc = data[:,-1]
data = data[:,0:-1]
# data = data/np.max(data)
data_t = data[-1,:]
# check to see if any genotypes are at least 10% of the max cell count
if any(data_t >= 1):
survive_count += 1
if counts_total is None:
counts_total = data
else:
counts_total += data
# data = data/np.max(data)
# exp_info.populations[num].counts_log_scale = True
data = data/max_cells
if k==0:
drug_kwargs = {'alpha':0.7,
'color':'black',
'linewidth':2,
'label':'Drug Concentration ($\u03BC$M)'
}
tcax,drug_ax = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
drug_curve=dc,
drug_ax_sci_notation=True,
drug_kwargs=drug_kwargs,
legend_labels=False,
grayscale=True,
color='gray',
linewidth=1,
labelsize=12,
alpha=0.7
)
drug_ax.set_ylabel('')
drug_axes.append( drug_ax )
else:
tcax,da = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
grayscale=True,
color='gray',
legend_labels=False,
linewidth=2,
labelsize=12,
alpha=0.2
)
# drug_ax.set_ylim(0,10**4)
k+=1
if survive_count > 0:
counts_avg = counts_total/survive_count
# counts_avg = counts_avg/np.max(counts_avg)
# counts_avg = counts_total
counts_avg = counts_avg/np.max(counts_avg)
tcax,temp = plotter.plot_timecourse_to_axes(exp_info.populations[num],
counts_avg,
tcax,
labelsize=12)
# t = np.arange(len(dc))
# t = t*exp_info.populations[0].timestep_scale/24
# da.plot(t,dc)
tc_axes.append( tcax )
axnum+=1 | [
"numpy.flip",
"fears.utils.results_manager.get_data",
"os.listdir",
"fears.utils.results_manager.get_experiment_results",
"numpy.max",
"fears.utils.plotter.plot_timecourse_to_axes",
"numpy.argwhere",
"matplotlib.pyplot.subplots"
] | [((258, 324), 'fears.utils.results_manager.get_experiment_results', 'results_manager.get_experiment_results', (['data_folder', 'exp_info_file'], {}), '(data_folder, exp_info_file)\n', (296, 324), False, 'from fears.utils import results_manager, plotter, dir_manager\n'), ((512, 526), 'numpy.flip', 'np.flip', (['k_abs'], {}), '(k_abs)\n', (519, 526), True, 'import numpy as np\n'), ((537, 583), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(4, 4)'}), '(nrows=2, ncols=2, figsize=(4, 4))\n', (549, 583), True, 'import matplotlib.pyplot as plt\n'), ((779, 808), 'numpy.argwhere', 'np.argwhere', (['(k_abs == k_abs_t)'], {}), '(k_abs == k_abs_t)\n', (790, 808), True, 'import numpy as np\n'), ((935, 955), 'os.listdir', 'os.listdir', ([], {'path': 'exp'}), '(path=exp)\n', (945, 955), False, 'import os\n'), ((1182, 1211), 'fears.utils.results_manager.get_data', 'results_manager.get_data', (['sim'], {}), '(sim)\n', (1206, 1211), False, 'from fears.utils import results_manager, plotter, dir_manager\n'), ((3986, 4080), 'fears.utils.plotter.plot_timecourse_to_axes', 'plotter.plot_timecourse_to_axes', (['exp_info.populations[num]', 'counts_avg', 'tcax'], {'labelsize': '(12)'}), '(exp_info.populations[num], counts_avg, tcax,\n labelsize=12)\n', (4017, 4080), False, 'from fears.utils import results_manager, plotter, dir_manager\n'), ((2004, 2242), 'fears.utils.plotter.plot_timecourse_to_axes', 'plotter.plot_timecourse_to_axes', (['exp_info.populations[num]', 'data', 'tcax'], {'drug_curve': 'dc', 'drug_ax_sci_notation': '(True)', 'drug_kwargs': 'drug_kwargs', 'legend_labels': '(False)', 'grayscale': '(True)', 'color': '"""gray"""', 'linewidth': '(1)', 'labelsize': '(12)', 'alpha': '(0.7)'}), "(exp_info.populations[num], data, tcax,\n drug_curve=dc, drug_ax_sci_notation=True, drug_kwargs=drug_kwargs,\n legend_labels=False, grayscale=True, color='gray', linewidth=1,\n labelsize=12, alpha=0.7)\n", (2035, 2242), False, 'from fears.utils import results_manager, plotter, dir_manager\n'), ((3016, 3183), 'fears.utils.plotter.plot_timecourse_to_axes', 'plotter.plot_timecourse_to_axes', (['exp_info.populations[num]', 'data', 'tcax'], {'grayscale': '(True)', 'color': '"""gray"""', 'legend_labels': '(False)', 'linewidth': '(2)', 'labelsize': '(12)', 'alpha': '(0.2)'}), "(exp_info.populations[num], data, tcax,\n grayscale=True, color='gray', legend_labels=False, linewidth=2,\n labelsize=12, alpha=0.2)\n", (3047, 3183), False, 'from fears.utils import results_manager, plotter, dir_manager\n'), ((3947, 3965), 'numpy.max', 'np.max', (['counts_avg'], {}), '(counts_avg)\n', (3953, 3965), True, 'import numpy as np\n')] |
# Copyright (c) 1999-2008 <NAME> and <NAME>
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.symbols.Type import Type
class TypeDeclAST(DeclAST):
def __init__(self, slicc, type_ast, pairs, field_asts):
super(TypeDeclAST, self).__init__(slicc, pairs)
self.type_ast = type_ast
self.field_asts = field_asts
def __repr__(self):
return "[TypeDecl: %r]" % (self.type_ast)
def files(self, parent=None):
if "external" in self:
return set()
if parent:
ident = "%s_%s" % (parent, self.type_ast.ident)
else:
ident = self.type_ast.ident
return set(("%s.hh" % ident, "%s.cc" % ident))
def generate(self):
ident = str(self.type_ast)
machine = self.symtab.state_machine
# Make the new type
new_type = Type(self.symtab, ident, self.location, self.pairs,
self.state_machine)
if machine:
machine.addType(new_type)
self.symtab.newSymbol(new_type)
self.symtab.pushFrame()
# Add all of the fields of the type to it
for field in self.field_asts:
field.generate(new_type)
self.symtab.popFrame()
| [
"slicc.symbols.Type.Type"
] | [((2387, 2458), 'slicc.symbols.Type.Type', 'Type', (['self.symtab', 'ident', 'self.location', 'self.pairs', 'self.state_machine'], {}), '(self.symtab, ident, self.location, self.pairs, self.state_machine)\n', (2391, 2458), False, 'from slicc.symbols.Type import Type\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import ray
from ray.rllib.ddpg2.models import DDPGModel
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.optimizers import PolicyEvaluator
from ray.rllib.utils.filter import NoFilter
from ray.rllib.utils.process_rollout import process_rollout
from ray.rllib.utils.sampler import SyncSampler
class DDPGEvaluator(PolicyEvaluator):
def __init__(self, registry, env_creator, config):
self.env = ModelCatalog.get_preprocessor_as_wrapper(
registry, env_creator(config["env_config"]))
# contains model, target_model
self.model = DDPGModel(registry, self.env, config)
self.sampler = SyncSampler(
self.env, self.model.model, NoFilter(),
config["num_local_steps"], horizon=config["horizon"])
def sample(self):
"""Returns a batch of samples."""
rollout = self.sampler.get_data()
rollout.data["weights"] = np.ones_like(rollout.data["rewards"])
# since each sample is one step, no discounting needs to be applied;
# this does not involve config["gamma"]
samples = process_rollout(
rollout, NoFilter(),
gamma=1.0, use_gae=False)
return samples
def update_target(self):
"""Updates target critic and target actor."""
self.model.update_target()
def compute_gradients(self, samples):
"""Returns critic, actor gradients."""
return self.model.compute_gradients(samples)
def apply_gradients(self, grads):
"""Applies gradients to evaluator weights."""
self.model.apply_gradients(grads)
def compute_apply(self, samples):
grads, _ = self.compute_gradients(samples)
self.apply_gradients(grads)
def get_weights(self):
"""Returns model weights."""
return self.model.get_weights()
def set_weights(self, weights):
"""Sets model weights."""
self.model.set_weights(weights)
def get_completed_rollout_metrics(self):
"""Returns metrics on previously completed rollouts.
Calling this clears the queue of completed rollout metrics.
"""
return self.sampler.get_metrics()
RemoteDDPGEvaluator = ray.remote(DDPGEvaluator)
| [
"numpy.ones_like",
"ray.remote",
"ray.rllib.utils.filter.NoFilter",
"ray.rllib.ddpg2.models.DDPGModel"
] | [((2374, 2399), 'ray.remote', 'ray.remote', (['DDPGEvaluator'], {}), '(DDPGEvaluator)\n', (2384, 2399), False, 'import ray\n'), ((712, 749), 'ray.rllib.ddpg2.models.DDPGModel', 'DDPGModel', (['registry', 'self.env', 'config'], {}), '(registry, self.env, config)\n', (721, 749), False, 'from ray.rllib.ddpg2.models import DDPGModel\n'), ((1071, 1108), 'numpy.ones_like', 'np.ones_like', (["rollout.data['rewards']"], {}), "(rollout.data['rewards'])\n", (1083, 1108), True, 'import numpy as np\n'), ((839, 849), 'ray.rllib.utils.filter.NoFilter', 'NoFilter', ([], {}), '()\n', (847, 849), False, 'from ray.rllib.utils.filter import NoFilter\n'), ((1299, 1309), 'ray.rllib.utils.filter.NoFilter', 'NoFilter', ([], {}), '()\n', (1307, 1309), False, 'from ray.rllib.utils.filter import NoFilter\n')] |
from math import sqrt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
from skimage import io
import matplotlib.pyplot as plt
image = io.imread("star.jpg")
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True)
ax = axes.ravel()
for idx, (blobs, color, title) in enumerate(sequence):
ax[idx].set_title(title)
ax[idx].imshow(image)
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax[idx].add_patch(c)
ax[idx].set_axis_off()
plt.tight_layout()
plt.show() | [
"skimage.color.rgb2gray",
"matplotlib.pyplot.Circle",
"skimage.feature.blob_dog",
"math.sqrt",
"skimage.io.imread",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"skimage.feature.blob_doh",
"skimage.feature.blob_log",
"matplotlib.pyplot.show"
] | [((205, 226), 'skimage.io.imread', 'io.imread', (['"""star.jpg"""'], {}), "('star.jpg')\n", (214, 226), False, 'from skimage import io\n'), ((240, 255), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (248, 255), False, 'from skimage.color import rgb2gray\n'), ((269, 332), 'skimage.feature.blob_log', 'blob_log', (['image_gray'], {'max_sigma': '(30)', 'num_sigma': '(10)', 'threshold': '(0.1)'}), '(image_gray, max_sigma=30, num_sigma=10, threshold=0.1)\n', (277, 332), False, 'from skimage.feature import blob_dog, blob_log, blob_doh\n'), ((425, 474), 'skimage.feature.blob_dog', 'blob_dog', (['image_gray'], {'max_sigma': '(30)', 'threshold': '(0.1)'}), '(image_gray, max_sigma=30, threshold=0.1)\n', (433, 474), False, 'from skimage.feature import blob_dog, blob_log, blob_doh\n'), ((531, 581), 'skimage.feature.blob_doh', 'blob_doh', (['image_gray'], {'max_sigma': '(30)', 'threshold': '(0.01)'}), '(image_gray, max_sigma=30, threshold=0.01)\n', (539, 581), False, 'from skimage.feature import blob_dog, blob_log, blob_doh\n'), ((817, 877), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(9, 3)', 'sharex': '(True)', 'sharey': '(True)'}), '(1, 3, figsize=(9, 3), sharex=True, sharey=True)\n', (829, 877), True, 'import matplotlib.pyplot as plt\n'), ((1182, 1200), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1198, 1200), True, 'import matplotlib.pyplot as plt\n'), ((1201, 1211), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1209, 1211), True, 'import matplotlib.pyplot as plt\n'), ((404, 411), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (408, 411), False, 'from math import sqrt\n'), ((510, 517), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (514, 517), False, 'from math import sqrt\n'), ((1065, 1124), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(x, y)', 'r'], {'color': 'color', 'linewidth': '(2)', 'fill': '(False)'}), '((x, y), r, color=color, linewidth=2, fill=False)\n', (1075, 1124), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
r"""
Dirichlet characters
A :class:`DirichletCharacter` is the extension of a homomorphism
.. MATH::
(\ZZ/N\ZZ)^* \to R^*,
for some ring `R`, to the map `\ZZ/N\ZZ \to R` obtained by sending
those `x\in\ZZ/N\ZZ` with `\gcd(N,x)>1` to `0`.
EXAMPLES::
sage: G = DirichletGroup(35)
sage: x = G.gens()
sage: e = x[0]*x[1]^2; e
Dirichlet character modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2 - 1
sage: e.order()
12
This illustrates a canonical coercion::
sage: e = DirichletGroup(5, QQ).0
sage: f = DirichletGroup(5,CyclotomicField(4)).0
sage: e*f
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4
AUTHORS:
- <NAME> (2005-09-02): Fixed bug in comparison of Dirichlet
characters. It was checking that their values were the same, but
not checking that they had the same level!
- <NAME> (2006-01-07): added more examples
- <NAME> (2006-05-21): added examples of everything; fix a
*lot* of tiny bugs and design problem that became clear when
creating examples.
- <NAME> (2008-02-16): speed up __call__ method for
Dirichlet characters, miscellaneous fixes
- <NAME> (2014-03-06): use UniqueFactory to cache DirichletGroups
"""
# ****************************************************************************
# Copyright (C) 2004-2006 <NAME> <<EMAIL>>
# Copyright (C) 2014 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function
import sage.categories.all as cat
from sage.misc.all import prod
import sage.misc.prandom as random
import sage.modules.free_module as free_module
import sage.modules.free_module_element as free_module_element
import sage.rings.all as rings
import sage.rings.number_field.number_field as number_field
from sage.libs.pari import pari
from sage.categories.map import Map
from sage.rings.rational_field import is_RationalField
from sage.rings.complex_mpfr import is_ComplexField
from sage.rings.qqbar import is_AlgebraicField
from sage.rings.ring import is_Ring
from sage.misc.functional import round
from sage.misc.cachefunc import cached_method
from sage.misc.fast_methods import WithEqualityById
from sage.structure.element import MultiplicativeGroupElement
from sage.structure.gens_py import multiplicative_iterator
from sage.structure.parent import Parent
from sage.structure.sequence import Sequence
from sage.structure.factory import UniqueFactory
from sage.structure.richcmp import richcmp
from sage.arith.all import (binomial, bernoulli, kronecker, factor, gcd,
lcm, fundamental_discriminant, euler_phi, factorial, valuation)
def trivial_character(N, base_ring=rings.RationalField()):
r"""
Return the trivial character of the given modulus, with values in the given
base ring.
EXAMPLES::
sage: t = trivial_character(7)
sage: [t(x) for x in [0..20]]
[0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1]
sage: t(1).parent()
Rational Field
sage: trivial_character(7, Integers(3))(1).parent()
Ring of integers modulo 3
"""
return DirichletGroup(N, base_ring)(1)
TrivialCharacter = trivial_character
def kronecker_character(d):
"""
Return the quadratic Dirichlet character (d/.) of minimal
conductor.
EXAMPLES::
sage: kronecker_character(97*389*997^2)
Dirichlet character modulo 37733 of conductor 37733 mapping 1557 |--> -1, 37346 |--> -1
::
sage: a = kronecker_character(1)
sage: b = DirichletGroup(2401,QQ)(a) # NOTE -- over QQ!
sage: b.modulus()
2401
AUTHORS:
- <NAME> (2006-08-06)
"""
d = rings.Integer(d)
if d == 0:
raise ValueError("d must be nonzero")
D = fundamental_discriminant(d)
G = DirichletGroup(abs(D), rings.RationalField())
return G([kronecker(D,u) for u in G.unit_gens()])
def kronecker_character_upside_down(d):
"""
Return the quadratic Dirichlet character (./d) of conductor d, for
d0.
EXAMPLES::
sage: kronecker_character_upside_down(97*389*997^2)
Dirichlet character modulo 37506941597 of conductor 37733 mapping 13533432536 |--> -1, 22369178537 |--> -1, 14266017175 |--> 1
AUTHORS:
- <NAME> (2006-08-06)
"""
d = rings.Integer(d)
if d <= 0:
raise ValueError("d must be positive")
G = DirichletGroup(d, rings.RationalField())
return G([kronecker(u.lift(),d) for u in G.unit_gens()])
def is_DirichletCharacter(x):
r"""
Return True if x is of type DirichletCharacter.
EXAMPLES::
sage: from sage.modular.dirichlet import is_DirichletCharacter
sage: is_DirichletCharacter(trivial_character(3))
True
sage: is_DirichletCharacter([1])
False
"""
return isinstance(x, DirichletCharacter)
class DirichletCharacter(MultiplicativeGroupElement):
"""
A Dirichlet character.
"""
def __init__(self, parent, x, check=True):
r"""
Create a Dirichlet character with specified values on
generators of `(\ZZ/n\ZZ)^*`.
INPUT:
- ``parent`` -- :class:`DirichletGroup`, a group of Dirichlet
characters
- ``x`` -- one of the following:
- tuple or list of ring elements: the values of the
Dirichlet character on the standard generators of
`(\ZZ/N\ZZ)^*` as returned by
:meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`.
- vector over `\ZZ/e\ZZ`, where `e` is the order of the
standard root of unity for ``parent``.
In both cases, the orders of the elements must divide the
orders of the respective generators of `(\ZZ/N\ZZ)^*`.
OUTPUT:
The Dirichlet character defined by `x` (type
:class:`DirichletCharacter`).
EXAMPLES::
sage: G.<e> = DirichletGroup(13)
sage: G
Group of Dirichlet characters modulo 13 with values in Cyclotomic Field of order 12 and degree 4
sage: e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: loads(e.dumps()) == e
True
::
sage: G, x = DirichletGroup(35).objgens()
sage: e = x[0]*x[1]; e
Dirichlet character modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2
sage: e.order()
12
sage: loads(e.dumps()) == e
True
TESTS::
sage: G = DirichletGroup(10)
sage: TestSuite(G[1]).run()
It is checked that the orders of the elements in `x` are
admissible (see :trac:`17283`)::
sage: k.<i> = CyclotomicField(4)
sage: G = DirichletGroup(192)
sage: G([i, -1, -1])
Traceback (most recent call last):
...
ValueError: values (= (zeta16^4, -1, -1)) must have multiplicative orders dividing (2, 16, 2), respectively
sage: from sage.modular.dirichlet import DirichletCharacter
sage: M = FreeModule(Zmod(16), 3)
sage: DirichletCharacter(G, M([4, 8, 8]))
Traceback (most recent call last):
...
ValueError: values (= (4, 8, 8) modulo 16) must have additive orders dividing (2, 16, 2), respectively
"""
MultiplicativeGroupElement.__init__(self, parent)
if check:
orders = parent.integers_mod().unit_group().gens_orders()
if len(x) != len(orders):
raise ValueError("wrong number of values (= {}) on generators (want {})".format(x, len(orders)))
if free_module_element.is_FreeModuleElement(x):
x = parent._module(x)
if any(u * v for u, v in zip(x, orders)):
raise ValueError("values (= {} modulo {}) must have additive orders dividing {}, respectively"
.format(x, parent.zeta_order(), orders))
self.element.set_cache(x)
else:
R = parent.base_ring()
x = tuple(map(R, x))
if R.is_exact() and any(u**v != 1 for u, v in zip(x, orders)):
raise ValueError("values (= {}) must have multiplicative orders dividing {}, respectively"
.format(x, orders))
self.values_on_gens.set_cache(x)
else:
if free_module_element.is_FreeModuleElement(x):
self.element.set_cache(x)
else:
self.values_on_gens.set_cache(x)
@cached_method
def __eval_at_minus_one(self):
r"""
Efficiently evaluate the character at -1 using knowledge of its
order. This is potentially much more efficient than computing the
value of -1 directly using dlog and a large power of the image root
of unity.
We use the following. Proposition: Suppose eps is a character mod
`p^n`, where `p` is a prime. Then
`\varepsilon(-1) = -1` if and only if `p = 2` and
the factor of eps at 4 is nontrivial or `p > 2` and 2 does
not divide `\phi(p^n)/\mbox{\rm ord}(\varepsilon)`.
EXAMPLES::
sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one()
-1
"""
D = self.decomposition()
val = self.base_ring()(1)
for e in D:
if e.modulus() % 2 == 0:
if e.modulus() % 4 == 0:
val *= e.values_on_gens()[0] # first gen is -1 for 2-power modulus
elif (euler_phi(e.parent().modulus()) / e.order()) % 2:
val *= -1
return val
def __call__(self, m):
"""
Return the value of this character at the integer `m`.
.. warning::
A table of values of the character is made the first time
you call this (unless `m` equals -1)
EXAMPLES::
sage: G = DirichletGroup(60)
sage: e = prod(G.gens(), G(1))
sage: e
Dirichlet character modulo 60 of conductor 60 mapping 31 |--> -1, 41 |--> -1, 37 |--> zeta4
sage: e(-1)
-1
sage: e(2)
0
sage: e(7)
-zeta4
sage: Integers(60).unit_gens()
(31, 41, 37)
sage: e(31)
-1
sage: e(41)
-1
sage: e(37)
zeta4
sage: e(31*37)
-zeta4
sage: parent(e(31*37))
Cyclotomic Field of order 4 and degree 2
"""
N = self.modulus()
m = m % N
if self.values.is_in_cache() or m != N - 1:
return self.values()[m]
else:
return self.__eval_at_minus_one()
def change_ring(self, R):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a conversion map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
EXAMPLES::
sage: e = DirichletGroup(7, QQ).0
sage: f = e.change_ring(QuadraticField(3, 'a'))
sage: f.parent()
Group of Dirichlet characters modulo 7 with values in Number Field in a with defining polynomial x^2 - 3 with a = 1.732050807568878?
::
sage: e = DirichletGroup(13).0
sage: e.change_ring(QQ)
Traceback (most recent call last):
...
TypeError: Unable to coerce zeta12 to a rational
We test the case where `R` is a map (:trac:`18072`)::
sage: K.<i> = QuadraticField(-1)
sage: chi = DirichletGroup(5, K)[1]
sage: chi(2)
i
sage: f = K.complex_embeddings()[0]
sage: psi = chi.change_ring(f)
sage: psi(2)
-1.83697019872103e-16 - 1.00000000000000*I
"""
if self.base_ring() is R:
return self
G = self.parent().change_ring(R)
return G.element_class(G, [R(x) for x in self.values_on_gens()])
def _richcmp_(self, other, op):
"""
Compare ``self`` to ``other``.
.. NOTE::
Since there is no coercion between Dirichlet groups
of different moduli, characters of different moduli
compare as unequal, even if they define identical
functions on ``ZZ``.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: f = e.restrict(8)
sage: e == e
True
sage: f == f
True
sage: e == f
False
sage: k = DirichletGroup(7)([-1])
sage: k == e
False
"""
return richcmp(self.values_on_gens(), other.values_on_gens(), op)
def __hash__(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: hash(e) == hash((-1,1))
True
"""
return hash(self.values_on_gens())
def __invert__(self):
"""
Return the multiplicative inverse of self.
EXAMPLES::
sage: e = DirichletGroup(13).0
sage: f = ~e
sage: f*e
Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = -self.element()
else:
x = tuple(~z for z in self.values_on_gens())
return G.element_class(G, x, check=False)
def _mul_(self, other):
"""
Return the product of self and other.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: b
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
sage: a*b # indirect doctest
Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> zeta4
Multiplying elements whose parents have different zeta orders works::
sage: a = DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1)
sage: b = DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1])
sage: a * b # indirect doctest
Dirichlet character modulo 3 of conductor 3 mapping 2 |--> -1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = self.element() + other.element()
else:
x = tuple(y * z for y, z in zip(self.values_on_gens(), other.values_on_gens()))
return G.element_class(G, x, check=False)
def __copy__(self):
"""
Return a (shallow) copy of this Dirichlet character.
EXAMPLES::
sage: G.<a> = DirichletGroup(11)
sage: b = copy(a)
sage: a is b
False
sage: a.element() is b.element()
False
sage: a.values_on_gens() is b.values_on_gens()
True
"""
# This method exists solely because of a bug in the cPickle module --
# see modsym/manin_symbols.py.
G = self.parent()
return G.element_class(G, self.values_on_gens(), check=False)
def __pow__(self, n):
"""
Return self raised to the power of n
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a^2
Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1
sage: b^2
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = n * self.element()
else:
x = tuple(z**n for z in self.values_on_gens())
return G.element_class(G, x, check=False)
def _repr_short_(self):
r"""
A short string representation of self, often used in string representations of modular forms
EXAMPLES::
sage: chi = DirichletGroup(24).0
sage: chi._repr_short_()
'[-1, 1, 1]'
"""
return str(list(self.values_on_gens()))
def _repr_(self):
"""
String representation of self.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: repr(a) # indirect doctest
'Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1'
TESTS:
Dirichlet characters modulo 1 and 2 are printed correctly (see
:trac:`17338`)::
sage: DirichletGroup(1)[0]
Dirichlet character modulo 1 of conductor 1
sage: DirichletGroup(2)[0]
Dirichlet character modulo 2 of conductor 1
"""
s = 'Dirichlet character modulo %s of conductor %s' % (self.modulus(), self.conductor())
r = len(self.values_on_gens())
if r != 0:
s += ' mapping '
for i in range(r):
if i != 0:
s += ', '
s += str(self.parent().unit_gens()[i]) + ' |--> ' + str(self.values_on_gens()[i])
return s
def _latex_(self):
r"""
LaTeX representation of self.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(16)
sage: latex(b) # indirect doctest
\hbox{Dirichlet character modulo } 16 \hbox{ of conductor } 16 \hbox{ mapping } 15 \mapsto 1,\ 5 \mapsto \zeta_{4}
TESTS:
Dirichlet characters modulo 1 and 2 are printed correctly (see
:trac:`17338`)::
sage: latex(DirichletGroup(1)[0])
\hbox{Dirichlet character modulo } 1 \hbox{ of conductor } 1
sage: latex(DirichletGroup(2)[0])
\hbox{Dirichlet character modulo } 2 \hbox{ of conductor } 1
"""
s = r'\hbox{Dirichlet character modulo } %s \hbox{ of conductor } %s' % (self.modulus(), self.conductor())
r = len(self.values_on_gens())
if r != 0:
s += r' \hbox{ mapping } '
for i in range(r):
if i != 0:
s += r',\ '
s += self.parent().unit_gens()[i]._latex_() + r' \mapsto ' + self.values_on_gens()[i]._latex_()
return s
def base_ring(self):
"""
Returns the base ring of this Dirichlet character.
EXAMPLES::
sage: G = DirichletGroup(11)
sage: G.gen(0).base_ring()
Cyclotomic Field of order 10 and degree 4
sage: G = DirichletGroup(11, RationalField())
sage: G.gen(0).base_ring()
Rational Field
"""
return self.parent().base_ring()
def bar(self):
"""
Return the complex conjugate of this Dirichlet character.
EXAMPLES::
sage: e = DirichletGroup(5).0
sage: e
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4
sage: e.bar()
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4
"""
return ~self
def bernoulli(self, k, algorithm='recurrence', cache=True, **opts):
r"""
Returns the generalized Bernoulli number `B_{k,eps}`.
INPUT:
- ``k`` -- a non-negative integer
- ``algorithm`` -- either ``'recurrence'`` (default) or
``'definition'``
- ``cache`` -- if True, cache answers
- ``**opts`` -- optional arguments; not used directly, but
passed to the :func:`bernoulli` function if this is called
OUTPUT:
Let `\varepsilon` be a (not necessarily primitive) character
of modulus `N`. This function returns the generalized
Bernoulli number `B_{k,\varepsilon}`, as defined by the
following identity of power series (see for example
[DI1995]_, Section 2.2):
.. MATH::
\sum_{a=1}^N \frac{\varepsilon(a) t e^{at}}{e^{Nt}-1}
= sum_{k=0}^{\infty} \frac{B_{k,\varepsilon}}{k!} t^k.
ALGORITHM:
The ``'recurrence'`` algorithm computes generalized Bernoulli
numbers via classical Bernoulli numbers using the formula in
[Coh2007]_, Proposition 9.4.5; this is usually optimal. The
``definition`` algorithm uses the definition directly.
.. WARNING::
In the case of the trivial Dirichlet character modulo 1,
this function returns `B_{1,\varepsilon} = 1/2`, in
accordance with the above definition, but in contrast to
the value `B_1 = -1/2` for the classical Bernoulli number.
Some authors use an alternative definition giving
`B_{1,\varepsilon} = -1/2`; see the discussion in
[Coh2007]_, Section 9.4.1.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.bernoulli(5)
7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13
sage: eps = DirichletGroup(9).0
sage: eps.bernoulli(3)
10*zeta6 + 4
sage: eps.bernoulli(3, algorithm="definition")
10*zeta6 + 4
TESTS:
Check that :trac:`17586` is fixed::
sage: DirichletGroup(1)[0].bernoulli(1)
1/2
"""
if cache:
try:
self.__bernoulli
except AttributeError:
self.__bernoulli = {}
if k in self.__bernoulli:
return self.__bernoulli[k]
N = self.modulus()
K = self.base_ring()
if N == 1:
# By definition, the first Bernoulli number of the trivial
# character is 1/2, in contrast to the value B_1 = -1/2.
ber = K.one()/2 if k == 1 else K(bernoulli(k))
elif self(-1) != K((-1)**k):
ber = K.zero()
elif algorithm == "recurrence":
# The following code is pretty fast, at least compared to
# the other algorithm below. That said, I'm sure it could
# be sped up by a factor of 10 or more in many cases,
# especially since we end up computing all the Bernoulli
# numbers up to k, which should be done with power series
# instead of calls to the Bernoulli function. Likewise
# computing all binomial coefficients can be done much
# more efficiently.
v = self.values()
S = lambda n: sum(v[r] * r**n for r in range(1, N))
ber = K(sum(binomial(k,j) * bernoulli(j, **opts) *
N**(j-1) * S(k-j) for j in range(k+1)))
elif algorithm == "definition":
# This is better since it computes the same thing, but requires
# no arith in a poly ring over a number field.
prec = k+2
R = rings.PowerSeriesRing(rings.QQ, 't')
t = R.gen()
# g(t) = t/(e^{Nt}-1)
g = t/((N*t).exp(prec) - 1)
# h(n) = g(t)*e^{nt}
h = [0] + [g * ((n*t).exp(prec)) for n in range(1,N+1)]
ber = sum([self(a)*h[a][k] for a in range(1,N+1)]) * factorial(k)
else:
raise ValueError("algorithm = '%s' unknown"%algorithm)
if cache:
self.__bernoulli[k] = ber
return ber
def lfunction(self, prec=53, algorithm='pari'):
"""
Return the L-function of ``self``.
The result is a wrapper around a PARI L-function or around
the ``lcalc`` program.
INPUT:
- ``prec`` -- precision (default 53)
- ``algorithm`` -- 'pari' (default) or 'lcalc'
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: L = a.lfunction(); L
PARI L-function associated to Dirichlet character modulo 20
of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: L(4)
0.988944551741105
With the algorithm "lcalc"::
sage: a = a.primitive_character()
sage: L = a.lfunction(algorithm='lcalc'); L
L-function with complex Dirichlet coefficients
sage: L.value(4) # abs tol 1e-14
0.988944551741105 - 5.16608739123418e-18*I
"""
if algorithm is None:
algorithm = 'pari'
if algorithm == 'pari':
from sage.lfunctions.pari import lfun_character, LFunction
Z = LFunction(lfun_character(self), prec=prec)
Z.rename('PARI L-function associated to %s' % self)
return Z
elif algorithm == 'lcalc':
from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character
return Lfunction_from_character(self)
raise ValueError('algorithm must be "pari" or "lcalc"')
@cached_method
def conductor(self):
"""
Computes and returns the conductor of this character.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.conductor()
4
sage: b.conductor()
5
sage: (a*b).conductor()
20
TESTS::
sage: G.<a, b> = DirichletGroup(20)
sage: type(G(1).conductor())
<type 'sage.rings.integer.Integer'>
"""
if self.modulus() == 1 or self.is_trivial():
return rings.Integer(1)
F = factor(self.modulus())
if len(F) > 1:
return prod([d.conductor() for d in self.decomposition()])
p = F[0][0]
# When p is odd, and x =/= 1, the conductor is the smallest p**r such that
# Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1).
# For a given r, whether or not the above divisibility holds
# depends only on the factor of p**(r-1) on the right hand side.
# Since p-1 is coprime to p, this smallest r such that the
# divisibility holds equals Valuation(Order(x),p)+1.
cond = p**(valuation(self.order(),p) + 1)
if p == 2 and F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order() != 1:
cond *= 2
return rings.Integer(cond)
@cached_method
def decomposition(self):
r"""
Return the decomposition of self as a product of Dirichlet
characters of prime power modulus, where the prime powers exactly
divide the modulus of this character.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: c = a*b
sage: d = c.decomposition(); d
[Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4]
sage: d[0].parent()
Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2
sage: d[1].parent()
Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2
We can't multiply directly, since coercion of one element into the
other parent fails in both cases::
sage: d[0]*d[1] == c
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for *: 'Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2' and 'Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2'
We can multiply if we're explicit about where we want the
multiplication to take place.
::
sage: G(d[0])*G(d[1]) == c
True
Conductors that are divisible by various powers of 2 present
some problems as the multiplicative group modulo `2^k` is
trivial for `k = 1` and non-cyclic for `k \ge 3`::
sage: (DirichletGroup(18).0).decomposition()
[Dirichlet character modulo 2 of conductor 1, Dirichlet character modulo 9 of conductor 9 mapping 2 |--> zeta6]
sage: (DirichletGroup(36).0).decomposition()
[Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet character modulo 9 of conductor 1 mapping 2 |--> 1]
sage: (DirichletGroup(72).0).decomposition()
[Dirichlet character modulo 8 of conductor 4 mapping 7 |--> -1, 5 |--> 1, Dirichlet character modulo 9 of conductor 1 mapping 2 |--> 1]
"""
D = self.parent().decomposition()
vals = [[z] for z in self.values_on_gens()]
if self.modulus() % 8 == 0: # 2 factors at 2.
vals[0].append(vals[1][0])
del vals[1]
elif self.modulus() % 4 == 2: # 0 factors at 2.
vals = [1] + vals
return [D[i](vals[i]) for i in range(len(D))]
def extend(self, M):
"""
Returns the extension of this character to a Dirichlet character
modulo the multiple M of the modulus.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: H.<c> = DirichletGroup(4)
sage: c.extend(20)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: a
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: c.extend(20) == a
True
"""
if M % self.modulus() != 0:
raise ArithmeticError("M(=%s) must be a multiple of the modulus(=%s)"%(M,self.modulus()))
H = DirichletGroup(M, self.base_ring())
return H(self)
def _pari_conversion(self):
r"""
Prepare data for the conversion of the character to Pari.
OUTPUT:
pair (G, v) where G is `(\ZZ / N \ZZ)^*` where `N` is the modulus
EXAMPLES::
sage: chi4 = DirichletGroup(4).gen()
sage: chi4._pari_conversion()
([[4, [0]], [2, [2], [3]], [[2]~, Vecsmall([2])],
[[4], [[1, matrix(0,2)]], Mat(1), [3], [2], [0]], Mat(1)], [1])
sage: chi = DirichletGroup(24)([1,-1,-1]); chi
Dirichlet character modulo 24 of conductor 24
mapping 7 |--> 1, 13 |--> -1, 17 |--> -1
sage: chi._pari_conversion()
([[24, [0]], [8, [2, 2, 2], [7, 13, 17]],
[[2, 2, 3]~, Vecsmall([3, 3, 1])],
[[8, 8, 3], [[1, matrix(0,2)], [1, matrix(0,2)], [2, Mat([2, 1])]],
[1, 0, 0; 0, 1, 0; 0, 0, 1], [7, 13, 17], [2, 2, 2], [0, 0, 0]],
[1, 0, 0; 0, 1, 0; 0, 0, 1]], [0, 1, 1])
"""
G = pari.znstar(self.modulus(), 1)
pari_orders = G[1][1]
pari_gens = G[1][2]
# one should use the following, but this does not work
# pari_orders = G.cyc()
# pari_gens = G.gen()
values_on_gens = (self(x) for x in pari_gens)
# now compute the input for pari (list of exponents)
P = self.parent()
if is_ComplexField(P.base_ring()):
zeta = P.zeta()
zeta_argument = zeta.argument()
v = [int(x.argument() / zeta_argument) for x in values_on_gens]
else:
dlog = P._zeta_dlog
v = [dlog[x] for x in values_on_gens]
m = P.zeta_order()
v = [(vi * oi) // m for vi, oi in zip(v, pari_orders)]
return (G, v)
def conrey_number(self):
r"""
Return the Conrey number for this character.
This is a positive integer coprime to q that identifies a
Dirichlet character of modulus q.
See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey
EXAMPLES::
sage: chi4 = DirichletGroup(4).gen()
sage: chi4.conrey_number()
3
sage: chi = DirichletGroup(24)([1,-1,-1]); chi
Dirichlet character modulo 24 of conductor 24
mapping 7 |--> 1, 13 |--> -1, 17 |--> -1
sage: chi.conrey_number()
5
sage: chi = DirichletGroup(60)([1,-1,I])
sage: chi.conrey_number()
17
sage: chi = DirichletGroup(420)([1,-1,-I,1])
sage: chi.conrey_number()
113
TESTS::
sage: eps1 = DirichletGroup(5)([-1])
sage: eps2 = DirichletGroup(5,QQ)([-1])
sage: eps1.conrey_number() == eps2.conrey_number()
True
"""
G, v = self._pari_conversion()
return pari.znconreyexp(G, v).sage()
def lmfdb_page(self):
r"""
Open the LMFDB web page of the character in a browser.
See https://www.lmfdb.org
EXAMPLES::
sage: E = DirichletGroup(4).gen()
sage: E.lmfdb_page() # optional -- webbrowser
"""
import webbrowser
lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}'
url = lmfdb_url.format(self.modulus(), self.conrey_number())
webbrowser.open(url)
def galois_orbit(self, sort=True):
r"""
Return the orbit of this character under the action of the absolute
Galois group of the prime subfield of the base ring.
EXAMPLES::
sage: G = DirichletGroup(30); e = G.1
sage: e.galois_orbit()
[Dirichlet character modulo 30 of conductor 5 mapping 11 |--> 1, 7 |--> -zeta4,
Dirichlet character modulo 30 of conductor 5 mapping 11 |--> 1, 7 |--> zeta4]
Another example::
sage: G = DirichletGroup(13)
sage: G.galois_orbits()
[
[Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1],
...,
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -1]
]
sage: e = G.0
sage: e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: e.galois_orbit()
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12^3 + zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^3 - zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12]
sage: e = G.0^2; e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^2
sage: e.galois_orbit()
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^2, Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12^2 + 1]
A non-example::
sage: chi = DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0
sage: chi.galois_orbit()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
"""
if not self.base_ring().is_integral_domain():
raise TypeError("Galois orbits only defined if base ring is an integral domain")
k = self.order()
if k <= 2:
return [self]
P = self.parent()
z = self.element()
o = int(z.additive_order())
Auts = set([m % o for m in P._automorphisms()])
v = [P.element_class(P, m * z, check=False) for m in Auts]
if sort:
v.sort()
return v
def gauss_sum(self, a=1):
r"""
Return a Gauss sum associated to this Dirichlet character.
The Gauss sum associated to `\chi` is
.. MATH::
g_a(\chi) = \sum_{r \in \ZZ/m\ZZ} \chi(r)\,\zeta^{ar},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m^{th}` root of unity.
FACTS: If the modulus is a prime `p` and the character is
nontrivial, then the Gauss sum has absolute value `\sqrt{p}`.
CACHING: Computed Gauss sums are *not* cached with this character.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G([-1])
sage: e.gauss_sum(1)
2*zeta6 - 1
sage: e.gauss_sum(2)
-2*zeta6 + 1
sage: norm(e.gauss_sum())
3
::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.gauss_sum()
-zeta156^46 + zeta156^45 + zeta156^42 + zeta156^41 + 2*zeta156^40 + zeta156^37 - zeta156^36 - zeta156^34 - zeta156^33 - zeta156^31 + 2*zeta156^30 + zeta156^28 - zeta156^24 - zeta156^22 + zeta156^21 + zeta156^20 - zeta156^19 + zeta156^18 - zeta156^16 - zeta156^15 - 2*zeta156^14 - zeta156^10 + zeta156^8 + zeta156^7 + zeta156^6 + zeta156^5 - zeta156^4 - zeta156^2 - 1
sage: factor(norm(e.gauss_sum()))
13^24
TESTS:
The field of algebraic numbers is supported (:trac:`19056`)::
sage: G = DirichletGroup(7, QQbar)
sage: G[1].gauss_sum()
-2.440133358345538? + 1.022618791871794?*I
Check that :trac:`19060` is fixed::
sage: K.<z> = CyclotomicField(8)
sage: G = DirichletGroup(13, K)
sage: chi = G([z^2])
sage: chi.gauss_sum()
zeta52^22 + zeta52^21 + zeta52^19 - zeta52^16 + zeta52^15 + zeta52^14 + zeta52^12 - zeta52^11 - zeta52^10 - zeta52^7 - zeta52^5 + zeta52^4
Check that :trac:`25127` is fixed::
sage: G = DirichletGroup(1)
sage: chi = G.one()
sage: chi.gauss_sum()
1
.. SEEALSO::
- :func:`sage.arith.misc.gauss_sum` for general finite fields
- :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version
"""
G = self.parent()
K = G.base_ring()
chi = self
m = G.modulus()
if is_ComplexField(K):
return self.gauss_sum_numerical(a=a)
elif is_AlgebraicField(K):
L = K
zeta = L.zeta(m)
elif number_field.is_CyclotomicField(K) or is_RationalField(K):
chi = chi.minimize_base_ring()
n = lcm(m, G.zeta_order())
L = rings.CyclotomicField(n)
zeta = L.gen(0) ** (n // m)
else:
raise NotImplementedError("Gauss sums only currently implemented when the base ring is a cyclotomic field, QQ, QQbar, or a complex field")
zeta = zeta ** a
g = L(chi(0))
z = L.one()
for c in chi.values()[1:]:
z *= zeta
g += L(c)*z
return g
def gauss_sum_numerical(self, prec=53, a=1):
r"""
Return a Gauss sum associated to this Dirichlet character as an
approximate complex number with prec bits of precision.
INPUT:
- ``prec`` -- integer (default: 53), *bits* of precision
- ``a`` -- integer, as for :meth:`gauss_sum`.
The Gauss sum associated to `\chi` is
.. MATH::
g_a(\chi) = \sum_{r \in \ZZ/m\ZZ} \chi(r)\,\zeta^{ar},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m^{th}` root of unity.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G.0
sage: abs(e.gauss_sum_numerical())
1.7320508075...
sage: sqrt(3.0)
1.73205080756888
sage: e.gauss_sum_numerical(a=2)
-...e-15 - 1.7320508075...*I
sage: e.gauss_sum_numerical(a=2, prec=100)
4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I
sage: G = DirichletGroup(13)
sage: H = DirichletGroup(13, CC)
sage: e = G.0
sage: f = H.0
sage: e.gauss_sum_numerical()
-3.07497205... + 1.8826966926...*I
sage: f.gauss_sum_numerical()
-3.07497205... + 1.8826966926...*I
sage: abs(e.gauss_sum_numerical())
3.60555127546...
sage: abs(f.gauss_sum_numerical())
3.60555127546...
sage: sqrt(13.0)
3.60555127546399
TESTS:
The field of algebraic numbers is supported (:trac:`19056`)::
sage: G = DirichletGroup(7, QQbar)
sage: G[1].gauss_sum_numerical()
-2.44013335834554 + 1.02261879187179*I
"""
G = self.parent()
K = G.base_ring()
if is_ComplexField(K):
phi = lambda t : t
CC = K
elif is_AlgebraicField(K):
from sage.rings.complex_mpfr import ComplexField
CC = ComplexField(prec)
phi = CC.coerce_map_from(K)
elif number_field.is_CyclotomicField(K) or is_RationalField(K):
phi = K.complex_embedding(prec)
CC = phi.codomain()
else:
raise NotImplementedError("Gauss sums only currently implemented when the base ring is a cyclotomic field, QQ, QQbar, or a complex field")
zeta = CC.zeta(G.modulus()) ** a
g = phi(self(0))
z = CC.one()
for c in self.values()[1:]:
z *= zeta
g += phi(c)*z
return g
def jacobi_sum(self, char, check=True):
r"""
Return the Jacobi sum associated to these Dirichlet characters
(i.e., J(self,char)).
This is defined as
.. MATH::
J(\chi, \psi) = \sum_{a \in \ZZ / N\ZZ} \chi(a) \psi(1-a)
where `\chi` and `\psi` are both characters modulo `N`.
EXAMPLES::
sage: D = DirichletGroup(13)
sage: e = D.0
sage: f = D[-2]
sage: e.jacobi_sum(f)
3*zeta12^2 + 2*zeta12 - 3
sage: f.jacobi_sum(e)
3*zeta12^2 + 2*zeta12 - 3
sage: p = 7
sage: DP = DirichletGroup(p)
sage: f = DP.0
sage: e.jacobi_sum(f)
Traceback (most recent call last):
...
NotImplementedError: Characters must be from the same Dirichlet Group.
sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j]))
....: for i in range(p-1) for j in range(i, p-1)]
sage: for s in all_jacobi_sums:
....: print(s)
((1,), (1,), 5)
((1,), (zeta6,), -1)
((1,), (zeta6 - 1,), -1)
((1,), (-1,), -1)
((1,), (-zeta6,), -1)
((1,), (-zeta6 + 1,), -1)
((zeta6,), (zeta6,), -zeta6 + 3)
((zeta6,), (zeta6 - 1,), 2*zeta6 + 1)
((zeta6,), (-1,), -2*zeta6 - 1)
((zeta6,), (-zeta6,), zeta6 - 3)
((zeta6,), (-zeta6 + 1,), 1)
((zeta6 - 1,), (zeta6 - 1,), -3*zeta6 + 2)
((zeta6 - 1,), (-1,), 2*zeta6 + 1)
((zeta6 - 1,), (-zeta6,), -1)
((zeta6 - 1,), (-zeta6 + 1,), -zeta6 - 2)
((-1,), (-1,), 1)
((-1,), (-zeta6,), -2*zeta6 + 3)
((-1,), (-zeta6 + 1,), 2*zeta6 - 3)
((-zeta6,), (-zeta6,), 3*zeta6 - 1)
((-zeta6,), (-zeta6 + 1,), -2*zeta6 + 3)
((-zeta6 + 1,), (-zeta6 + 1,), zeta6 + 2)
Let's check that trivial sums are being calculated correctly::
sage: N = 13
sage: D = DirichletGroup(N)
sage: g = D(1)
sage: g.jacobi_sum(g)
11
sage: sum([g(x)*g(1-x) for x in IntegerModRing(N)])
11
And sums where exactly one character is nontrivial (see :trac:`6393`)::
sage: G = DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1]
sage: Y.jacobi_sum(Z)
-1
sage: Z.jacobi_sum(Y)
-1
Now let's take a look at a non-prime modulus::
sage: N = 9
sage: D = DirichletGroup(N)
sage: g = D(1)
sage: g.jacobi_sum(g)
3
We consider a sum with values in a finite field::
sage: g = DirichletGroup(17, GF(9,'a')).0
sage: g.jacobi_sum(g**2)
2*a
TESTS:
This shows that :trac:`6393` has been fixed::
sage: G = DirichletGroup(5); X = G.list(); Y = X[0]; Z = X[1]
sage: # Y is trivial and Z is quartic
sage: sum([Y(x)*Z(1-x) for x in IntegerModRing(5)])
-1
sage: # The value -1 above is the correct value of the Jacobi sum J(Y, Z).
sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y)
-1
-1
"""
if check:
if self.parent() != char.parent():
raise NotImplementedError("Characters must be from the same Dirichlet Group.")
return sum([self(x) * char(1-x) for x in rings.IntegerModRing(self.modulus())])
def kloosterman_sum(self, a=1, b=0):
r"""
Return the "twisted" Kloosterman sum associated to this Dirichlet character.
This includes Gauss sums, classical Kloosterman sums, Salié sums, etc.
The Kloosterman sum associated to `\chi` and the integers a,b is
.. MATH::
K(a,b,\chi) = \sum_{r \in (\ZZ/m\ZZ)^\times} \chi(r)\,\zeta^{ar+br^{-1}},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m` th root of unity. This reduces to the Gauss sum if `b=0`.
This method performs an exact calculation and returns an element of a
suitable cyclotomic field; see also :meth:`.kloosterman_sum_numerical`,
which gives an inexact answer (but is generally much quicker).
CACHING: Computed Kloosterman sums are *not* cached with this
character.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G([-1])
sage: e.kloosterman_sum(3,5)
-2*zeta6 + 1
sage: G = DirichletGroup(20)
sage: e = G([1 for u in G.unit_gens()])
sage: e.kloosterman_sum(7,17)
-2*zeta20^6 + 2*zeta20^4 + 4
TESTS::
sage: G = DirichletGroup(20, UniversalCyclotomicField())
sage: e = G([1 for u in G.unit_gens()])
sage: e.kloosterman_sum(7,17)
-2*E(5) - 4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4
sage: G = DirichletGroup(12, QQbar)
sage: e = G.gens()[0]
sage: e.kloosterman_sum(5,11)
Traceback (most recent call last):
...
NotImplementedError: Kloosterman sums not implemented over this ring
"""
G = self.parent()
zo = G.zeta_order()
m = G.modulus()
g = 0
L = rings.CyclotomicField(m.lcm(zo))
zeta = L.gen(0)
try:
self(1) * zeta**(a+b)
except TypeError:
raise NotImplementedError('Kloosterman sums not implemented '
'over this ring')
n = zeta.multiplicative_order()
zeta = zeta**(n // m)
for c in m.coprime_integers(m):
e = rings.Mod(c, m)
g += self(c) * zeta**int(a*e + b*e**(-1))
return g
def kloosterman_sum_numerical(self, prec=53, a=1, b=0):
r"""
Return the Kloosterman sum associated to this Dirichlet character as
an approximate complex number with prec bits of precision.
See also :meth:`.kloosterman_sum`, which calculates the sum
exactly (which is generally slower).
INPUT:
- ``prec`` -- integer (default: 53), *bits* of precision
- ``a`` -- integer, as for :meth:`.kloosterman_sum`
- ``b`` -- integer, as for :meth:`.kloosterman_sum`.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G.0
The real component of the numerical value of e is near zero::
sage: v=e.kloosterman_sum_numerical()
sage: v.real() < 1.0e15
True
sage: v.imag()
1.73205080756888
sage: G = DirichletGroup(20)
sage: e = G.1
sage: e.kloosterman_sum_numerical(53,3,11)
3.80422606518061 - 3.80422606518061*I
"""
G = self.parent()
K = G.base_ring()
if not (number_field.is_CyclotomicField(K) or is_RationalField(K)):
raise NotImplementedError("Kloosterman sums only currently implemented when the base ring is a cyclotomic field or QQ.")
phi = K.complex_embedding(prec)
CC = phi.codomain()
g = 0
m = G.modulus()
zeta = CC.zeta(m)
for c in m.coprime_integers(m):
e = rings.Mod(c, m)
z = zeta ** int(a*e + b*(e**(-1)))
g += phi(self(c))*z
return g
@cached_method
def is_even(self):
r"""
Return ``True`` if and only if `\varepsilon(-1) = 1`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_even()
False
sage: e(-1)
-1
sage: [e.is_even() for e in G]
[True, False, True, False, True, False, True, False, True, False, True, False]
sage: G = DirichletGroup(13, CC)
sage: e = G.0
sage: e.is_even()
False
sage: e(-1)
-1.000000...
sage: [e.is_even() for e in G]
[True, False, True, False, True, False, True, False, True, False, True, False]
sage: G = DirichletGroup(100000, CC)
sage: G.1.is_even()
True
Note that ``is_even`` need not be the negation of
is_odd, e.g., in characteristic 2::
sage: G.<e> = DirichletGroup(13, GF(4,'a'))
sage: e.is_even()
True
sage: e.is_odd()
True
"""
R = self.base_ring()
# self(-1) is either +1 or -1
if not R.is_exact():
return abs(self(-1) - R(1)) < 0.5
return self(-1) == R(1)
@cached_method
def is_odd(self):
r"""
Return ``True`` if and only if
`\varepsilon(-1) = -1`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_odd()
True
sage: [e.is_odd() for e in G]
[False, True, False, True, False, True, False, True, False, True, False, True]
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_odd()
True
sage: [e.is_odd() for e in G]
[False, True, False, True, False, True, False, True, False, True, False, True]
sage: G = DirichletGroup(100000, CC)
sage: G.0.is_odd()
True
Note that ``is_even`` need not be the negation of
is_odd, e.g., in characteristic 2::
sage: G.<e> = DirichletGroup(13, GF(4,'a'))
sage: e.is_even()
True
sage: e.is_odd()
True
"""
R = self.base_ring()
# self(-1) is either +1 or -1
if not R.is_exact():
return abs(self(-1) - R(-1)) < 0.5
return self(-1) == R(-1)
@cached_method
def is_primitive(self):
"""
Return ``True`` if and only if this character is
primitive, i.e., its conductor equals its modulus.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.is_primitive()
False
sage: b.is_primitive()
False
sage: (a*b).is_primitive()
True
sage: G.<a,b> = DirichletGroup(20, CC)
sage: a.is_primitive()
False
sage: b.is_primitive()
False
sage: (a*b).is_primitive()
True
"""
return (self.conductor() == self.modulus())
@cached_method
def is_trivial(self):
r"""
Returns ``True`` if this is the trivial character,
i.e., has order 1.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.is_trivial()
False
sage: (a^2).is_trivial()
True
"""
if self.element.is_in_cache():
return not self.element()
one = self.base_ring().one()
return all(x == one for x in self.values_on_gens())
def kernel(self):
r"""
Return the kernel of this character.
OUTPUT: Currently the kernel is returned as a list. This may
change.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.kernel()
[1, 9, 13, 17]
sage: b.kernel()
[1, 11]
"""
one = self.base_ring().one()
return [x for x in range(self.modulus()) if self(x) == one]
def maximize_base_ring(self):
r"""
Let
.. MATH::
\varepsilon : (\ZZ/N\ZZ)^* \to \QQ(\zeta_n)
be a Dirichlet character. This function returns an equal Dirichlet
character
.. MATH::
\chi : (\ZZ/N\ZZ)^* \to \QQ(\zeta_m)
where `m` is the least common multiple of `n` and
the exponent of `(\ZZ/N\ZZ)^*`.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20,QQ)
sage: b.maximize_base_ring()
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1
sage: b.maximize_base_ring().base_ring()
Cyclotomic Field of order 4 and degree 2
sage: DirichletGroup(20).base_ring()
Cyclotomic Field of order 4 and degree 2
"""
g = rings.IntegerModRing(self.modulus()).unit_group_exponent()
if g == 1:
g = 2
z = self.base_ring().zeta()
n = z.multiplicative_order()
m = lcm(g,n)
if n == m:
return self
K = rings.CyclotomicField(m)
return self.change_ring(K)
def minimize_base_ring(self):
r"""
Return a Dirichlet character that equals this one, but over as
small a subfield (or subring) of the base ring as possible.
.. note::
This function is currently only implemented when the base
ring is a number field. It's the identity function in
characteristic p.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = DirichletGroup(13).0
sage: e.base_ring()
Cyclotomic Field of order 12 and degree 4
sage: e.minimize_base_ring().base_ring()
Cyclotomic Field of order 12 and degree 4
sage: (e^2).minimize_base_ring().base_ring()
Cyclotomic Field of order 6 and degree 2
sage: (e^3).minimize_base_ring().base_ring()
Cyclotomic Field of order 4 and degree 2
sage: (e^12).minimize_base_ring().base_ring()
Rational Field
TESTS:
Check that :trac:`18479` is fixed::
sage: f = Newforms(Gamma1(25), names='a')[1]
sage: eps = f.character()
sage: eps.minimize_base_ring() == eps
True
A related bug (see :trac:`18086`)::
sage: K.<a,b>=NumberField([x^2 + 1, x^2 - 3])
sage: chi = DirichletGroup(7, K).0
sage: chi.minimize_base_ring()
Dirichlet character modulo 7 of conductor 7 mapping 3 |--> -1/2*b*a + 1/2
"""
R = self.base_ring()
if R.is_prime_field():
return self
p = R.characteristic()
if p:
K = rings.IntegerModRing(p)
elif self.order() <= 2:
K = rings.QQ
elif (isinstance(R, number_field.NumberField_generic)
and euler_phi(self.order()) < R.absolute_degree()):
K = rings.CyclotomicField(self.order())
else:
return self
try:
return self.change_ring(K)
except (TypeError, ValueError, ArithmeticError):
return self
def modulus(self):
"""
The modulus of this character.
EXAMPLES::
sage: e = DirichletGroup(100, QQ).0
sage: e.modulus()
100
sage: e.conductor()
4
"""
return self.parent().modulus()
def level(self):
"""
Synonym for modulus.
EXAMPLES::
sage: e = DirichletGroup(100, QQ).0
sage: e.level()
100
"""
return self.modulus()
@cached_method
def multiplicative_order(self):
"""
The order of this character.
EXAMPLES::
sage: e = DirichletGroup(100).1
sage: e.order() # same as multiplicative_order, since group is multiplicative
20
sage: e.multiplicative_order()
20
sage: e = DirichletGroup(100).0
sage: e.multiplicative_order()
2
"""
if self.parent().zeta.is_in_cache():
return self.element().additive_order()
return lcm([z.multiplicative_order() for z in self.values_on_gens()])
def primitive_character(self):
"""
Returns the primitive character associated to self.
EXAMPLES::
sage: e = DirichletGroup(100).0; e
Dirichlet character modulo 100 of conductor 4 mapping 51 |--> -1, 77 |--> 1
sage: e.conductor()
4
sage: f = e.primitive_character(); f
Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1
sage: f.modulus()
4
"""
return self.restrict(self.conductor())
def restrict(self, M):
"""
Returns the restriction of this character to a Dirichlet character
modulo the divisor M of the modulus, which must also be a multiple
of the conductor of this character.
EXAMPLES::
sage: e = DirichletGroup(100).0
sage: e.modulus()
100
sage: e.conductor()
4
sage: e.restrict(20)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: e.restrict(4)
Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1
sage: e.restrict(50)
Traceback (most recent call last):
...
ValueError: conductor(=4) must divide M(=50)
"""
M = int(M)
if self.modulus()%M != 0:
raise ValueError("M(=%s) must divide the modulus(=%s)"%(M,self.modulus()))
if M%self.conductor() != 0:
raise ValueError("conductor(=%s) must divide M(=%s)"%(self.conductor(),M))
H = DirichletGroup(M, self.base_ring())
return H(self)
@cached_method
def values(self):
"""
Return a list of the values of this character on each integer
between 0 and the modulus.
EXAMPLES::
sage: e = DirichletGroup(20)(1)
sage: e.values()
[0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1]
sage: e = DirichletGroup(20).gen(0)
sage: e.values()
[0, 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0, 0, 1, 0, -1]
sage: e = DirichletGroup(20).gen(1)
sage: e.values()
[0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1, 0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1]
sage: e = DirichletGroup(21).gen(0) ; e.values()
[0, 1, -1, 0, 1, -1, 0, 0, -1, 0, 1, -1, 0, 1, 0, 0, 1, -1, 0, 1, -1]
sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values()
[0, 1, 36, 0, 1, 36, 0, 0, 36, 0, 1, 36, 0, 1, 0, 0, 1, 36, 0, 1, 36]
sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values()
[0, 1, 2, 0, 1, 2, 0, 0, 2, 0, 1, 2, 0, 1, 0, 0, 1, 2, 0, 1, 2]
::
sage: chi = DirichletGroup(100151, CyclotomicField(10)).0
sage: ls = chi.values() ; ls[0:10]
[0,
1,
-zeta10^3,
-zeta10,
-zeta10,
1,
zeta10^3 - zeta10^2 + zeta10 - 1,
zeta10,
zeta10^3 - zeta10^2 + zeta10 - 1,
zeta10^2]
TESTS:
Test that :trac:`11783` and :trac:`14368` are fixed::
sage: chi = DirichletGroup(1).list()[0]
sage: chi.values()
[1]
sage: chi(1)
1
"""
G = self.parent()
R = G.base_ring()
mod = self.parent().modulus()
if mod == 1:
return [R.one()]
elif mod == 2:
return [R.zero(), R.one()]
result_list = [R.zero()] * mod
gens = G.unit_gens()
orders = G.integers_mod().unit_group().gens_orders()
R_values = G._zeta_powers
val_on_gen = self.element()
exponents = [0] * len(orders)
n = G.integers_mod().one()
value = val_on_gen.base_ring().zero()
while True:
# record character value on n
result_list[n] = R_values[value]
# iterate:
# increase the exponent vector by 1,
# increase n accordingly, and increase value
i = 0
while True:
try:
exponents[i] += 1
except IndexError: # Done!
return result_list
value += val_on_gen[i]
n *= gens[i]
if exponents[i] < orders[i]:
break
exponents[i] = 0
i += 1
@cached_method(do_pickle=True)
def values_on_gens(self):
r"""
Return a tuple of the values of ``self`` on the standard
generators of `(\ZZ/N\ZZ)^*`, where `N` is the modulus.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: e.values_on_gens ()
(-1, 1)
.. NOTE::
The constructor of :class:`DirichletCharacter` sets the
cache of :meth:`element` or of :meth:`values_on_gens`. The cache of
one of these methods needs to be set for the other method to work properly,
these caches have to be stored when pickling an instance of
:class:`DirichletCharacter`.
"""
pows = self.parent()._zeta_powers
return tuple([pows[i] for i in self.element()])
@cached_method(do_pickle=True)
def element(self):
r"""
Return the underlying `\ZZ/n\ZZ`-module
vector of exponents.
.. warning::
Please do not change the entries of the returned vector;
this vector is mutable *only* because immutable vectors are
not implemented yet.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.element()
(2, 0)
sage: b.element()
(0, 1)
.. NOTE::
The constructor of :class:`DirichletCharacter` sets the
cache of :meth:`element` or of :meth:`values_on_gens`. The cache of
one of these methods needs to be set for the other method to work properly,
these caches have to be stored when pickling an instance of
:class:`DirichletCharacter`.
"""
P = self.parent()
M = P._module
if is_ComplexField(P.base_ring()):
zeta = P.zeta()
zeta_argument = zeta.argument()
v = M([int(round(x.argument() / zeta_argument))
for x in self.values_on_gens()])
else:
dlog = P._zeta_dlog
v = M([dlog[x] for x in self.values_on_gens()])
v.set_immutable()
return v
def __setstate__(self, state):
r"""
Restore a pickled element from ``state``.
TESTS::
sage: e = DirichletGroup(16)([-1, 1])
sage: loads(dumps(e)) == e
True
"""
# values_on_gens() used an explicit cache __values_on_gens in the past
# we need to set the cache of values_on_gens() from that if we encounter it in a pickle
values_on_gens_key = '_DirichletCharacter__values_on_gens'
values_on_gens = None
state_dict = state[1]
if values_on_gens_key in state_dict:
values_on_gens = state_dict[values_on_gens_key]
del state_dict[values_on_gens_key]
# element() used an explicit cache __element in the past
# we need to set the cache of element() from that if we encounter it in a pickle
element_key = '_DirichletCharacter__element'
element = None
if element_key in state_dict:
element = state_dict[element_key]
del state_dict[element_key]
super(DirichletCharacter, self).__setstate__(state)
if values_on_gens is not None:
self.values_on_gens.set_cache(values_on_gens)
if element is not None:
self.element.set_cache(element)
class DirichletGroupFactory(UniqueFactory):
r"""
Construct a group of Dirichlet characters modulo `N`.
INPUT:
- ``N`` -- positive integer
- ``base_ring`` -- commutative ring; the value ring for the
characters in this group (default: the cyclotomic field
`\QQ(\zeta_n)`, where `n` is the exponent of `(\ZZ/N\ZZ)^*`)
- ``zeta`` -- (optional) root of unity in ``base_ring``
- ``zeta_order`` -- (optional) positive integer; this must be the
order of ``zeta`` if both are specified
- ``names`` -- ignored (needed so ``G.<...> = DirichletGroup(...)``
notation works)
- ``integral`` -- boolean (default: ``False``); whether to replace
the default cyclotomic field by its rings of integers as the
base ring. This is ignored if ``base_ring`` is not ``None``.
OUTPUT:
The group of Dirichlet characters modulo `N` with values in a
subgroup `V` of the multiplicative group `R^*` of ``base_ring``.
This is the group of homomorphisms `(\ZZ/N\ZZ)^* \to V` with
pointwise multiplication. The group `V` is determined as follows:
- If both ``zeta`` and ``zeta_order`` are omitted, then `V` is
taken to be `R^*`, or equivalently its `n`-torsion subgroup,
where `n` is the exponent of `(\ZZ/N\ZZ)^*`. Many operations,
such as finding a set of generators for the group, are only
implemented if `V` is cyclic and a generator for `V` can be
found.
- If ``zeta`` is specified, then `V` is taken to be the cyclic
subgroup of `R^*` generated by ``zeta``. If ``zeta_order`` is
also given, it must be the multiplicative order of ``zeta``;
this is useful if the base ring is not exact or if the order of
``zeta`` is very large.
- If ``zeta`` is not specified but ``zeta_order`` is, then `V` is
taken to be the group of roots of unity of order dividing
``zeta_order`` in `R`. In this case, `R` must be a domain (so
`V` is cyclic), and `V` must have order ``zeta_order``.
Furthermore, a generator ``zeta`` of `V` is computed, and an
error is raised if such ``zeta`` cannot be found.
EXAMPLES:
The default base ring is a cyclotomic field of order the exponent
of `(\ZZ/N\ZZ)^*`::
sage: DirichletGroup(20)
Group of Dirichlet characters modulo 20 with values in Cyclotomic Field of order 4 and degree 2
We create the group of Dirichlet character mod 20 with values in
the rational numbers::
sage: G = DirichletGroup(20, QQ); G
Group of Dirichlet characters modulo 20 with values in Rational Field
sage: G.order()
4
sage: G.base_ring()
Rational Field
The elements of G print as lists giving the values of the character
on the generators of `(Z/NZ)^*`::
sage: list(G)
[Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1, Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1, Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> -1]
Next we construct the group of Dirichlet character mod 20, but with
values in `\QQ(\zeta_n)`::
sage: G = DirichletGroup(20)
sage: G.1
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
We next compute several invariants of ``G``::
sage: G.gens()
(Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4)
sage: G.unit_gens()
(11, 17)
sage: G.zeta()
zeta4
sage: G.zeta_order()
4
In this example we create a Dirichlet group with values in a
number field::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^4 + 1)
sage: DirichletGroup(5, K)
Group of Dirichlet characters modulo 5 with values in Number Field in a with defining polynomial x^4 + 1
An example where we give ``zeta``, but not its order::
sage: G = DirichletGroup(5, K, a); G
Group of Dirichlet characters modulo 5 with values in the group of order 8 generated by a in Number Field in a with defining polynomial x^4 + 1
sage: G.list()
[Dirichlet character modulo 5 of conductor 1 mapping 2 |--> 1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> a^2, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -a^2]
We can also restrict the order of the characters, either with or
without specifying a root of unity::
sage: DirichletGroup(5, K, zeta=-1, zeta_order=2)
Group of Dirichlet characters modulo 5 with values in the group of order 2 generated by -1 in Number Field in a with defining polynomial x^4 + 1
sage: DirichletGroup(5, K, zeta_order=2)
Group of Dirichlet characters modulo 5 with values in the group of order 2 generated by -1 in Number Field in a with defining polynomial x^4 + 1
::
sage: G.<e> = DirichletGroup(13)
sage: loads(G.dumps()) == G
True
::
sage: G = DirichletGroup(19, GF(5))
sage: loads(G.dumps()) == G
True
We compute a Dirichlet group over a large prime field::
sage: p = next_prime(10^40)
sage: g = DirichletGroup(19, GF(p)); g
Group of Dirichlet characters modulo 19 with values in Finite Field of size 10000000000000000000000000000000000000121
Note that the root of unity has small order, i.e., it is not the
largest order root of unity in the field::
sage: g.zeta_order()
2
::
sage: r4 = CyclotomicField(4).ring_of_integers()
sage: G = DirichletGroup(60, r4)
sage: G.gens()
(Dirichlet character modulo 60 of conductor 4 mapping 31 |--> -1, 41 |--> 1, 37 |--> 1, Dirichlet character modulo 60 of conductor 3 mapping 31 |--> 1, 41 |--> -1, 37 |--> 1, Dirichlet character modulo 60 of conductor 5 mapping 31 |--> 1, 41 |--> 1, 37 |--> zeta4)
sage: val = G.gens()[2].values_on_gens()[2] ; val
zeta4
sage: parent(val)
Gaussian Integers in Cyclotomic Field of order 4 and degree 2
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val)
17
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3)
22
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3
22
sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3)
Residue field of Fractional ideal (-2*zeta4 + 5)
::
sage: DirichletGroup(60, integral=True)
Group of Dirichlet characters modulo 60 with values in Gaussian Integers in Cyclotomic Field of order 4 and degree 2
sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2])
Gaussian Integers in Cyclotomic Field of order 4 and degree 2
If the order of ``zeta`` cannot be determined automatically, we
can specify it using ``zeta_order``::
sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6))
Traceback (most recent call last):
...
NotImplementedError: order of element not known
sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6)
Group of Dirichlet characters modulo 7 with values in the group of order 6 generated by 0.500000000000000 + 0.866025403784439*I in Complex Field with 53 bits of precision
If the base ring is not a domain (in which case the group of roots
of unity is not necessarily cyclic), some operations still work,
such as creation of elements::
sage: G = DirichletGroup(5, Zmod(15)); G
Group of Dirichlet characters modulo 5 with values in Ring of integers modulo 15
sage: chi = G([13]); chi
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 13
sage: chi^2
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 4
sage: chi.multiplicative_order()
4
Other operations only work if ``zeta`` is specified::
sage: G.gens()
Traceback (most recent call last):
...
NotImplementedError: factorization of polynomials over rings with composite characteristic is not implemented
sage: G = DirichletGroup(5, Zmod(15), zeta=2); G
Group of Dirichlet characters modulo 5 with values in the group of order 4 generated by 2 in Ring of integers modulo 15
sage: G.gens()
(Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 2,)
TESTS:
Dirichlet groups are cached, creating two groups with the same parameters
yields the same object::
sage: DirichletGroup(60) is DirichletGroup(60)
True
"""
def create_key(self, N, base_ring=None, zeta=None, zeta_order=None,
names=None, integral=False):
"""
Create a key that uniquely determines a Dirichlet group.
TESTS::
sage: DirichletGroup.create_key(60)
(Cyclotomic Field of order 4 and degree 2, 60, None, None)
An example to illustrate that ``base_ring`` is a part of the key::
sage: k = DirichletGroup.create_key(2, base_ring=QQ); k
(Rational Field, 2, None, None)
sage: l = DirichletGroup.create_key(2, base_ring=CC); l
(Complex Field with 53 bits of precision, 2, None, None)
sage: k == l
False
sage: G = DirichletGroup.create_object(None, k); G
Group of Dirichlet characters modulo 2 with values in Rational Field
sage: H = DirichletGroup.create_object(None, l); H
Group of Dirichlet characters modulo 2 with values in Complex Field with 53 bits of precision
sage: G == H
False
If ``base_ring`` was not be a part of the key, the keys would compare
equal and the caching would be broken::
sage: k = k[1:]; k
(2, None, None)
sage: l = l[1:]; l
(2, None, None)
sage: k == l
True
sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC)
False
If the base ring is not an integral domain, an error will be
raised if only ``zeta_order`` is specified::
sage: DirichletGroup(17, Integers(15))
Group of Dirichlet characters modulo 17 with values in Ring of integers modulo 15
sage: DirichletGroup(17, Integers(15), zeta_order=4)
Traceback (most recent call last):
...
ValueError: base ring (= Ring of integers modulo 15) must be an integral domain if only zeta_order is specified
sage: G = DirichletGroup(17, Integers(15), zeta=7); G
Group of Dirichlet characters modulo 17 with values in the group of order 4 generated by 7 in Ring of integers modulo 15
sage: G.order()
4
sage: DirichletGroup(-33)
Traceback (most recent call last):
...
ValueError: modulus should be positive
"""
modulus = rings.Integer(N)
if modulus <= 0:
raise ValueError('modulus should be positive')
if base_ring is None:
if not (zeta is None and zeta_order is None):
raise ValueError("zeta and zeta_order must be None if base_ring not specified")
e = rings.IntegerModRing(modulus).unit_group_exponent()
base_ring = rings.CyclotomicField(e)
if integral:
base_ring = base_ring.ring_of_integers()
if not is_Ring(base_ring):
raise TypeError("base_ring (= %s) must be a ring" % base_ring)
# If either zeta or zeta_order is given, compute the other.
if zeta is not None:
zeta = base_ring(zeta)
if zeta_order is None:
zeta_order = zeta.multiplicative_order()
elif zeta_order is not None:
if not base_ring.is_integral_domain():
raise ValueError("base ring (= %s) must be an integral domain if only zeta_order is specified"
% base_ring)
zeta_order = rings.Integer(zeta_order)
zeta = base_ring.zeta(zeta_order)
return (base_ring, modulus, zeta, zeta_order)
def create_object(self, version, key, **extra_args):
"""
Create the object from the key (extra arguments are ignored). This is
only called if the object was not found in the cache.
TESTS::
sage: K = CyclotomicField(4)
sage: DirichletGroup.create_object(None, (K, 60, K.gen(), 4))
Group of Dirichlet characters modulo 60 with values in the group of order 4 generated by zeta4 in Cyclotomic Field of order 4 and degree 2
"""
base_ring, modulus, zeta, zeta_order = key
return DirichletGroup_class(base_ring, modulus, zeta, zeta_order)
DirichletGroup = DirichletGroupFactory("DirichletGroup")
def is_DirichletGroup(x):
"""
Returns True if x is a Dirichlet group.
EXAMPLES::
sage: from sage.modular.dirichlet import is_DirichletGroup
sage: is_DirichletGroup(DirichletGroup(11))
True
sage: is_DirichletGroup(11)
False
sage: is_DirichletGroup(DirichletGroup(11).0)
False
"""
return isinstance(x, DirichletGroup_class)
class DirichletGroup_class(WithEqualityById, Parent):
"""
Group of Dirichlet characters modulo `N` with values in a ring `R`.
"""
Element = DirichletCharacter
def __init__(self, base_ring, modulus, zeta, zeta_order):
"""
Create a Dirichlet group.
Not to be called directly (use the factory function ``DirichletGroup``).
The ``DirichletGroup`` factory ensures that either both
``zeta`` and ``zeta_order`` are specified, or that both are
``None``. In the former case, it also ensures that ``zeta``
is an element of ``base_ring`` and that ``zeta_order`` is an
element of ``ZZ``.
TESTS::
sage: G = DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect doctest
sage: TestSuite(G).run()
sage: G.base() # check that Parent.__init__ has been called
Ring of integers modulo 9
sage: DirichletGroup(13) == DirichletGroup(13)
True
sage: DirichletGroup(13) == DirichletGroup(13, QQ)
False
"""
from sage.categories.groups import Groups
category = Groups().Commutative()
if base_ring.is_integral_domain() or base_ring.is_finite():
# The group of n-th roots of unity in the base ring is
# finite, and hence this Dirichlet group is finite too.
# In particular, it is finitely generated; the added
# FinitelyGenerated() here means that the group has a
# distinguished set of generators.
category = category.Finite().FinitelyGenerated()
Parent.__init__(self, base_ring, category=category)
self._zeta = zeta
self._zeta_order = zeta_order
self._modulus = modulus
self._integers = rings.IntegerModRing(modulus)
def __setstate__(self, state):
"""
Used for unpickling old instances.
TESTS::
sage: G = DirichletGroup(9)
sage: loads(dumps(G)) is G
True
"""
self._set_element_constructor()
if '_zeta_order' in state:
state['_zeta_order'] = rings.Integer(state['_zeta_order'])
super(DirichletGroup_class, self).__setstate__(state)
@property
def _module(self):
"""
Return the free module used to represent Dirichlet characters.
TESTS::
sage: DirichletGroup(12)._module
Vector space of dimension 2 over Ring of integers modulo 2
"""
return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()),
len(self.unit_gens()))
@property
def _zeta_powers(self):
"""
Return a list of powers of the distinguished root of unity.
TESTS::
sage: DirichletGroup(5)._zeta_powers
[1, zeta4, -1, -zeta4]
"""
R = self.base_ring()
a = R.one()
w = [a]
zeta = self.zeta()
zeta_order = self.zeta_order()
if is_ComplexField(R):
for i in range(1, zeta_order):
a = a * zeta
a._set_multiplicative_order(zeta_order/gcd(zeta_order, i))
w.append(a)
else:
for i in range(1, zeta_order):
a = a * zeta
w.append(a)
return w
@property
def _zeta_dlog(self):
"""
Return a dictionary that can be used to compute discrete
logarithms in the value group of this Dirichlet group.
TESTS::
sage: DirichletGroup(5)._zeta_dlog
{-1: 2, -zeta4: 3, zeta4: 1, 1: 0}
"""
return {z: i for i, z in enumerate(self._zeta_powers)}
def change_ring(self, R, zeta=None, zeta_order=None):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a conversion map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
- ``zeta`` -- (optional) root of unity in ``R``
- ``zeta_order`` -- (optional) order of ``zeta``
EXAMPLES::
sage: G = DirichletGroup(7,QQ); G
Group of Dirichlet characters modulo 7 with values in Rational Field
sage: G.change_ring(CyclotomicField(6))
Group of Dirichlet characters modulo 7 with values in Cyclotomic Field of order 6 and degree 2
TESTS:
We test the case where `R` is a map (:trac:`18072`)::
sage: K.<i> = QuadraticField(-1)
sage: f = K.complex_embeddings()[0]
sage: D = DirichletGroup(5, K)
sage: D.change_ring(f)
Group of Dirichlet characters modulo 5 with values in Complex Field with 53 bits of precision
"""
if zeta is None and self._zeta is not None:
# A root of unity was explicitly given; we use it over the
# new base ring as well.
zeta = self._zeta
if zeta_order is None:
# We reuse _zeta_order if we know that it stays the
# same; otherwise it will be recomputed as the order
# of R(zeta) by the DirichletGroup factory.
p = R.characteristic()
if p == 0 or p.gcd(self._zeta_order) == 1:
zeta_order = self._zeta_order
else:
# No root of unity specified; use the same zeta_order
# (which may still be None).
zeta_order = self._zeta_order
# Map zeta to the new parent
if zeta is not None:
zeta = R(zeta)
if isinstance(R, Map):
R = R.codomain()
return DirichletGroup(self.modulus(), R,
zeta=zeta,
zeta_order=zeta_order)
def base_extend(self, R):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a *coercion* map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
EXAMPLES::
sage: G = DirichletGroup(7,QQ); G
Group of Dirichlet characters modulo 7 with values in Rational Field
sage: H = G.base_extend(CyclotomicField(6)); H
Group of Dirichlet characters modulo 7 with values in Cyclotomic Field of order 6 and degree 2
Note that the root of unity can change::
sage: H.zeta()
zeta6
This method (in contrast to :meth:`change_ring`) requires a
coercion map to exist::
sage: G.base_extend(ZZ)
Traceback (most recent call last):
...
TypeError: no coercion map from Rational Field to Integer Ring is defined
Base-extended Dirichlet groups do not silently get roots of
unity with smaller order than expected (:trac:`6018`)::
sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4))
sage: H = DirichletGroup(10, CyclotomicField(4))
sage: G is H
True
sage: G3 = DirichletGroup(31, CyclotomicField(3))
sage: G5 = DirichletGroup(31, CyclotomicField(5))
sage: K30 = CyclotomicField(30)
sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30)
Dirichlet character modulo 31 of conductor 31 mapping 3 |--> -zeta30^7 + zeta30^5 + zeta30^4 + zeta30^3 - zeta30 - 1
When a root of unity is specified, base extension still works
if the new base ring is not an integral domain::
sage: f = DirichletGroup(17, ZZ, zeta=-1).0
sage: g = f.base_extend(Integers(15))
sage: g(3)
14
sage: g.parent().zeta()
14
"""
if not (isinstance(R, Map)
or R.has_coerce_map_from(self.base_ring())):
raise TypeError("no coercion map from %s to %s is defined"
% (self.base_ring(), R))
return self.change_ring(R)
def _element_constructor_(self, x):
"""
Construct a Dirichlet character from `x`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: K = G.base_ring()
sage: G(1)
Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1
sage: G([-1])
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -1
sage: G([K.0])
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: G(0)
Traceback (most recent call last):
...
TypeError: cannot convert 0 to an element of Group of Dirichlet characters modulo 13 with values in Cyclotomic Field of order 12 and degree 4
sage: G = DirichletGroup(6)
sage: G(DirichletGroup(3).0)
Dirichlet character modulo 6 of conductor 3 mapping 5 |--> -1
sage: G(DirichletGroup(15).0)
Dirichlet character modulo 6 of conductor 3 mapping 5 |--> -1
sage: G(DirichletGroup(15).1)
Traceback (most recent call last):
...
TypeError: conductor must divide modulus
sage: H = DirichletGroup(16, QQ); H(DirichletGroup(16).1)
Traceback (most recent call last):
...
TypeError: Unable to coerce zeta4 to a rational
"""
R = self.base_ring()
try:
if x == R.one():
x = [R.one()] * len(self.unit_gens())
except (TypeError, ValueError, ArithmeticError):
pass
if isinstance(x, list): # list of values on each unit generator
return self.element_class(self, x)
elif not isinstance(x, DirichletCharacter):
raise TypeError("cannot convert %s to an element of %s" % (x, self))
elif not x.conductor().divides(self.modulus()):
raise TypeError("conductor must divide modulus")
a = []
for u in self.unit_gens():
v = u.lift()
# have to do this, since e.g., unit gens mod 11 are not units mod 22.
while x.modulus().gcd(v) != 1:
v += self.modulus()
a.append(R(x(v)))
return self.element_class(self, a)
def _coerce_map_from_(self, X):
"""
Decide whether there is a coercion map from `X`.
There is conversion between Dirichlet groups of different
moduli, but no coercion. This implies that Dirichlet
characters of different moduli do not compare as equal.
TESTS::
sage: trivial_character(6) == trivial_character(3) # indirect doctest
False
sage: trivial_character(3) == trivial_character(9)
False
sage: trivial_character(3) == DirichletGroup(3, QQ).0^2
True
"""
return (isinstance(X, DirichletGroup_class) and
self.modulus() == X.modulus() and
self.base_ring().has_coerce_map_from(X.base_ring()) and
(self._zeta is None or
(X._zeta is not None and
self.base_ring()(X._zeta) in self._zeta_powers)))
def __len__(self):
"""
Return the number of elements of this Dirichlet group. This is the
same as self.order().
EXAMPLES::
sage: len(DirichletGroup(20))
8
sage: len(DirichletGroup(20, QQ))
4
sage: len(DirichletGroup(20, GF(5)))
8
sage: len(DirichletGroup(20, GF(2)))
1
sage: len(DirichletGroup(20, GF(3)))
4
"""
return self.order()
def _repr_(self):
"""
Return a print representation of this group, which can be renamed.
EXAMPLES::
sage: G = DirichletGroup(11)
sage: repr(G) # indirect doctest
'Group of Dirichlet characters modulo 11 with values in Cyclotomic Field of order 10 and degree 4'
sage: G.rename('Dir(11)')
sage: G
Dir(11)
"""
s = "Group of Dirichlet characters modulo %s with values in " % self.modulus()
if self._zeta is not None:
s += "the group of order %s generated by %s in " % (self._zeta_order, self._zeta)
s += str(self.base_ring())
return s
@cached_method
def decomposition(self):
r"""
Returns the Dirichlet groups of prime power modulus corresponding
to primes dividing modulus.
(Note that if the modulus is 2 mod 4, there will be a "factor" of
`(\ZZ/2\ZZ)^*`, which is the trivial group.)
EXAMPLES::
sage: DirichletGroup(20).decomposition()
[
Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2,
Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2
]
sage: DirichletGroup(20,GF(5)).decomposition()
[
Group of Dirichlet characters modulo 4 with values in Finite Field of size 5,
Group of Dirichlet characters modulo 5 with values in Finite Field of size 5
]
"""
R = self.base_ring()
return Sequence([DirichletGroup(p**r,R) for p, r \
in factor(self.modulus())],
cr=True,
universe = cat.Objects())
def exponent(self):
"""
Return the exponent of this group.
EXAMPLES::
sage: DirichletGroup(20).exponent()
4
sage: DirichletGroup(20,GF(3)).exponent()
2
sage: DirichletGroup(20,GF(2)).exponent()
1
sage: DirichletGroup(37).exponent()
36
"""
return self.zeta_order()
@cached_method
def _automorphisms(self):
"""
Compute the automorphisms of self. These are always given by raising to
a power, so the return value is a list of integers.
At present this is only implemented if the base ring has characteristic 0 or a prime.
EXAMPLES::
sage: DirichletGroup(17)._automorphisms()
[1, 3, 5, 7, 9, 11, 13, 15]
sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms()
[1, 11, 121, 1331]
sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms()
Traceback (most recent call last):
...
NotImplementedError: Automorphisms for finite non-field base rings not implemented
sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms()
Traceback (most recent call last):
...
NotImplementedError: Automorphisms for finite non-field base rings not implemented
"""
n = self.zeta_order()
R = self.base_ring()
p = R.characteristic()
if p == 0:
Auts = [e for e in range(1,n) if gcd(e,n) == 1]
else:
if not rings.ZZ(p).is_prime():
raise NotImplementedError("Automorphisms for finite non-field base rings not implemented")
# The automorphisms in characteristic p are
# k-th powering for
# k = 1, p, p^2, ..., p^(r-1),
# where p^r = 1 (mod n), so r is the mult order of p modulo n.
r = rings.IntegerModRing(n)(p).multiplicative_order()
Auts = [p**m for m in range(0,r)]
return Auts
def galois_orbits(self, v=None, reps_only=False, sort=True, check=True):
"""
Return a list of the Galois orbits of Dirichlet characters in self,
or in v if v is not None.
INPUT:
- ``v`` - (optional) list of elements of self
- ``reps_only`` - (optional: default False) if True
only returns representatives for the orbits.
- ``sort`` - (optional: default True) whether to sort
the list of orbits and the orbits themselves (slightly faster if
False).
- ``check`` - (optional, default: True) whether or not
to explicitly coerce each element of v into self.
The Galois group is the absolute Galois group of the prime subfield
of Frac(R). If R is not a domain, an error will be raised.
EXAMPLES::
sage: DirichletGroup(20).galois_orbits()
[
[Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> -1],
...,
[Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1]
]
sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
"""
if v is None:
v = self.list()
else:
if check:
v = [self(x) for x in v]
G = []
seen_so_far = set([])
for x in v:
z = x.element()
e = tuple(z) # change when there are immutable vectors (and below)
if e in seen_so_far:
continue
orbit = x.galois_orbit(sort=sort)
if reps_only:
G.append(x)
else:
G.append(orbit)
for z in orbit:
seen_so_far.add(tuple(z.element()))
G = Sequence(G, cr=True)
if sort:
G.sort()
return G
def gen(self, n=0):
"""
Return the n-th generator of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.gen(0)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: G.gen(1)
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
sage: G.gen(2)
Traceback (most recent call last):
...
IndexError: n(=2) must be between 0 and 1
::
sage: G.gen(-1)
Traceback (most recent call last):
...
IndexError: n(=-1) must be between 0 and 1
"""
n = int(n)
g = self.gens()
if n<0 or n>=len(g):
raise IndexError("n(=%s) must be between 0 and %s"%(n,len(g)-1))
return g[n]
@cached_method
def gens(self):
"""
Returns generators of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.gens()
(Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4)
"""
g = []
ord = self.zeta_order()
M = self._module
zero = M(0)
orders = self.integers_mod().unit_group().gens_orders()
for i in range(len(self.unit_gens())):
z = zero.__copy__()
z[i] = ord//gcd(ord, orders[i])
g.append(self.element_class(self, z, check=False))
return tuple(g)
def integers_mod(self):
r"""
Returns the group of integers `\ZZ/N\ZZ`
where `N` is the modulus of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.integers_mod()
Ring of integers modulo 20
"""
return self._integers
__iter__ = multiplicative_iterator
def list(self):
"""
Return a list of the Dirichlet characters in this group.
EXAMPLES::
sage: DirichletGroup(5).list()
[Dirichlet character modulo 5 of conductor 1 mapping 2 |--> 1,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4]
"""
return self._list_from_iterator()
def modulus(self):
"""
Returns the modulus of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.modulus()
20
"""
return self._modulus
def ngens(self):
"""
Returns the number of generators of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.ngens()
2
"""
return len(self.gens())
@cached_method
def order(self):
"""
Return the number of elements of self. This is the same as
len(self).
EXAMPLES::
sage: DirichletGroup(20).order()
8
sage: DirichletGroup(37).order()
36
"""
ord = rings.Integer(1)
for g in self.gens():
ord *= int(g.order())
return ord
def random_element(self):
"""
Return a random element of self.
The element is computed by multiplying a random power of each
generator together, where the power is between 0 and the order of
the generator minus 1, inclusive.
EXAMPLES::
sage: DirichletGroup(37).random_element()
Dirichlet character modulo 37 of conductor 37 mapping 2 |--> zeta36^4
sage: DirichletGroup(20).random_element()
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: DirichletGroup(60).random_element()
Dirichlet character modulo 60 of conductor 3 mapping 31 |--> 1, 41 |--> -1, 37 |--> 1
"""
e = self(1)
for i in range(self.ngens()):
g = self.gen(i)
n = random.randrange(g.order())
e *= g**n
return e
def unit_gens(self):
r"""
Returns the minimal generators for the units of
`(\ZZ/N\ZZ)^*`, where `N` is the
modulus of self.
EXAMPLES::
sage: DirichletGroup(37).unit_gens()
(2,)
sage: DirichletGroup(20).unit_gens()
(11, 17)
sage: DirichletGroup(60).unit_gens()
(31, 41, 37)
sage: DirichletGroup(20,QQ).unit_gens()
(11, 17)
"""
return self._integers.unit_gens()
@cached_method
def zeta(self):
"""
Return the chosen root of unity in the base ring.
EXAMPLES::
sage: DirichletGroup(37).zeta()
zeta36
sage: DirichletGroup(20).zeta()
zeta4
sage: DirichletGroup(60).zeta()
zeta4
sage: DirichletGroup(60,QQ).zeta()
-1
sage: DirichletGroup(60, GF(25,'a')).zeta()
2
"""
zeta = self._zeta
if zeta is None:
R = self.base_ring()
e = self._integers.unit_group_exponent()
for d in reversed(e.divisors()):
try:
zeta = R.zeta(d)
break
except ValueError:
pass
self.zeta_order.set_cache(d)
return zeta
@cached_method
def zeta_order(self):
"""
Return the order of the chosen root of unity in the base ring.
EXAMPLES::
sage: DirichletGroup(20).zeta_order()
4
sage: DirichletGroup(60).zeta_order()
4
sage: DirichletGroup(60, GF(25,'a')).zeta_order()
4
sage: DirichletGroup(19).zeta_order()
18
"""
order = self._zeta_order
if order is None:
order = self.zeta().multiplicative_order()
return order
| [
"sage.categories.all.Objects",
"sage.rings.complex_mpfr.ComplexField",
"sage.structure.element.MultiplicativeGroupElement.__init__",
"webbrowser.open",
"sage.rings.number_field.number_field.is_CyclotomicField",
"sage.categories.groups.Groups",
"sage.rings.complex_mpfr.is_ComplexField",
"sage.lfunctions.pari.lfun_character",
"sage.arith.all.gcd",
"sage.arith.all.kronecker",
"sage.rings.all.Integer",
"sage.libs.pari.pari.znconreyexp",
"sage.rings.all.Mod",
"sage.rings.rational_field.is_RationalField",
"sage.rings.qqbar.is_AlgebraicField",
"sage.arith.all.bernoulli",
"sage.arith.all.fundamental_discriminant",
"sage.libs.lcalc.lcalc_Lfunction.Lfunction_from_character",
"sage.rings.all.CyclotomicField",
"sage.structure.sequence.Sequence",
"sage.rings.all.RationalField",
"sage.structure.parent.Parent.__init__",
"sage.rings.all.PowerSeriesRing",
"sage.rings.ring.is_Ring",
"sage.modules.free_module_element.is_FreeModuleElement",
"sage.rings.all.IntegerModRing",
"sage.arith.all.lcm",
"sage.misc.cachefunc.cached_method",
"sage.rings.all.ZZ",
"sage.arith.all.binomial",
"sage.arith.all.factorial"
] | [((3225, 3246), 'sage.rings.all.RationalField', 'rings.RationalField', ([], {}), '()\n', (3244, 3246), True, 'import sage.rings.all as rings\n'), ((4241, 4257), 'sage.rings.all.Integer', 'rings.Integer', (['d'], {}), '(d)\n', (4254, 4257), True, 'import sage.rings.all as rings\n'), ((4328, 4355), 'sage.arith.all.fundamental_discriminant', 'fundamental_discriminant', (['d'], {}), '(d)\n', (4352, 4355), False, 'from sage.arith.all import binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation\n'), ((4862, 4878), 'sage.rings.all.Integer', 'rings.Integer', (['d'], {}), '(d)\n', (4875, 4878), True, 'import sage.rings.all as rings\n'), ((62251, 62280), 'sage.misc.cachefunc.cached_method', 'cached_method', ([], {'do_pickle': '(True)'}), '(do_pickle=True)\n', (62264, 62280), False, 'from sage.misc.cachefunc import cached_method\n'), ((63067, 63096), 'sage.misc.cachefunc.cached_method', 'cached_method', ([], {'do_pickle': '(True)'}), '(do_pickle=True)\n', (63080, 63096), False, 'from sage.misc.cachefunc import cached_method\n'), ((4387, 4408), 'sage.rings.all.RationalField', 'rings.RationalField', ([], {}), '()\n', (4406, 4408), True, 'import sage.rings.all as rings\n'), ((4968, 4989), 'sage.rings.all.RationalField', 'rings.RationalField', ([], {}), '()\n', (4987, 4989), True, 'import sage.rings.all as rings\n'), ((7989, 8038), 'sage.structure.element.MultiplicativeGroupElement.__init__', 'MultiplicativeGroupElement.__init__', (['self', 'parent'], {}), '(self, parent)\n', (8024, 8038), False, 'from sage.structure.element import MultiplicativeGroupElement\n'), ((26910, 26929), 'sage.rings.all.Integer', 'rings.Integer', (['cond'], {}), '(cond)\n', (26923, 26929), True, 'import sage.rings.all as rings\n'), ((33654, 33674), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (33669, 33674), False, 'import webbrowser\n'), ((38479, 38497), 'sage.rings.complex_mpfr.is_ComplexField', 'is_ComplexField', (['K'], {}), '(K)\n', (38494, 38497), False, 'from sage.rings.complex_mpfr import is_ComplexField\n'), ((41042, 41060), 'sage.rings.complex_mpfr.is_ComplexField', 'is_ComplexField', (['K'], {}), '(K)\n', (41057, 41060), False, 'from sage.rings.complex_mpfr import is_ComplexField\n'), ((54416, 54425), 'sage.arith.all.lcm', 'lcm', (['g', 'n'], {}), '(g, n)\n', (54419, 54425), False, 'from sage.arith.all import binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation\n'), ((54480, 54504), 'sage.rings.all.CyclotomicField', 'rings.CyclotomicField', (['m'], {}), '(m)\n', (54501, 54504), True, 'import sage.rings.all as rings\n'), ((76996, 77012), 'sage.rings.all.Integer', 'rings.Integer', (['N'], {}), '(N)\n', (77009, 77012), True, 'import sage.rings.all as rings\n'), ((80942, 80993), 'sage.structure.parent.Parent.__init__', 'Parent.__init__', (['self', 'base_ring'], {'category': 'category'}), '(self, base_ring, category=category)\n', (80957, 80993), False, 'from sage.structure.parent import Parent\n'), ((81115, 81144), 'sage.rings.all.IntegerModRing', 'rings.IntegerModRing', (['modulus'], {}), '(modulus)\n', (81135, 81144), True, 'import sage.rings.all as rings\n'), ((82357, 82375), 'sage.rings.complex_mpfr.is_ComplexField', 'is_ComplexField', (['R'], {}), '(R)\n', (82372, 82375), False, 'from sage.rings.complex_mpfr import is_ComplexField\n'), ((97283, 97303), 'sage.structure.sequence.Sequence', 'Sequence', (['G'], {'cr': '(True)'}), '(G, cr=True)\n', (97291, 97303), False, 'from sage.structure.sequence import Sequence\n'), ((100574, 100590), 'sage.rings.all.Integer', 'rings.Integer', (['(1)'], {}), '(1)\n', (100587, 100590), True, 'import sage.rings.all as rings\n'), ((4424, 4439), 'sage.arith.all.kronecker', 'kronecker', (['D', 'u'], {}), '(D, u)\n', (4433, 4439), False, 'from sage.arith.all import binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation\n'), ((8293, 8336), 'sage.modules.free_module_element.is_FreeModuleElement', 'free_module_element.is_FreeModuleElement', (['x'], {}), '(x)\n', (8333, 8336), True, 'import sage.modules.free_module_element as free_module_element\n'), ((9088, 9131), 'sage.modules.free_module_element.is_FreeModuleElement', 'free_module_element.is_FreeModuleElement', (['x'], {}), '(x)\n', (9128, 9131), True, 'import sage.modules.free_module_element as free_module_element\n'), ((26150, 26166), 'sage.rings.all.Integer', 'rings.Integer', (['(1)'], {}), '(1)\n', (26163, 26166), True, 'import sage.rings.all as rings\n'), ((38561, 38581), 'sage.rings.qqbar.is_AlgebraicField', 'is_AlgebraicField', (['K'], {}), '(K)\n', (38578, 38581), False, 'from sage.rings.qqbar import is_AlgebraicField\n'), ((41125, 41145), 'sage.rings.qqbar.is_AlgebraicField', 'is_AlgebraicField', (['K'], {}), '(K)\n', (41142, 41145), False, 'from sage.rings.qqbar import is_AlgebraicField\n'), ((47632, 47647), 'sage.rings.all.Mod', 'rings.Mod', (['c', 'm'], {}), '(c, m)\n', (47641, 47647), True, 'import sage.rings.all as rings\n'), ((49205, 49220), 'sage.rings.all.Mod', 'rings.Mod', (['c', 'm'], {}), '(c, m)\n', (49214, 49220), True, 'import sage.rings.all as rings\n'), ((56175, 56198), 'sage.rings.all.IntegerModRing', 'rings.IntegerModRing', (['p'], {}), '(p)\n', (56195, 56198), True, 'import sage.rings.all as rings\n'), ((77374, 77398), 'sage.rings.all.CyclotomicField', 'rings.CyclotomicField', (['e'], {}), '(e)\n', (77395, 77398), True, 'import sage.rings.all as rings\n'), ((77497, 77515), 'sage.rings.ring.is_Ring', 'is_Ring', (['base_ring'], {}), '(base_ring)\n', (77504, 77515), False, 'from sage.rings.ring import is_Ring\n'), ((81472, 81507), 'sage.rings.all.Integer', 'rings.Integer', (["state['_zeta_order']"], {}), "(state['_zeta_order'])\n", (81485, 81507), True, 'import sage.rings.all as rings\n'), ((25232, 25252), 'sage.lfunctions.pari.lfun_character', 'lfun_character', (['self'], {}), '(self)\n', (25246, 25252), False, 'from sage.lfunctions.pari import lfun_character, LFunction\n'), ((25485, 25515), 'sage.libs.lcalc.lcalc_Lfunction.Lfunction_from_character', 'Lfunction_from_character', (['self'], {}), '(self)\n', (25509, 25515), False, 'from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character\n'), ((33164, 33186), 'sage.libs.pari.pari.znconreyexp', 'pari.znconreyexp', (['G', 'v'], {}), '(G, v)\n', (33180, 33186), False, 'from sage.libs.pari import pari\n'), ((41225, 41243), 'sage.rings.complex_mpfr.ComplexField', 'ComplexField', (['prec'], {}), '(prec)\n', (41237, 41243), False, 'from sage.rings.complex_mpfr import ComplexField\n'), ((48824, 48858), 'sage.rings.number_field.number_field.is_CyclotomicField', 'number_field.is_CyclotomicField', (['K'], {}), '(K)\n', (48855, 48858), True, 'import sage.rings.number_field.number_field as number_field\n'), ((48862, 48881), 'sage.rings.rational_field.is_RationalField', 'is_RationalField', (['K'], {}), '(K)\n', (48878, 48881), False, 'from sage.rings.rational_field import is_RationalField\n'), ((78087, 78112), 'sage.rings.all.Integer', 'rings.Integer', (['zeta_order'], {}), '(zeta_order)\n', (78100, 78112), True, 'import sage.rings.all as rings\n'), ((80469, 80477), 'sage.categories.groups.Groups', 'Groups', ([], {}), '()\n', (80475, 80477), False, 'from sage.categories.groups import Groups\n'), ((92964, 92977), 'sage.categories.all.Objects', 'cat.Objects', ([], {}), '()\n', (92975, 92977), True, 'import sage.categories.all as cat\n'), ((98832, 98851), 'sage.arith.all.gcd', 'gcd', (['ord', 'orders[i]'], {}), '(ord, orders[i])\n', (98835, 98851), False, 'from sage.arith.all import binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation\n'), ((22577, 22589), 'sage.arith.all.bernoulli', 'bernoulli', (['k'], {}), '(k)\n', (22586, 22589), False, 'from sage.arith.all import binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation\n'), ((38643, 38677), 'sage.rings.number_field.number_field.is_CyclotomicField', 'number_field.is_CyclotomicField', (['K'], {}), '(K)\n', (38674, 38677), True, 'import sage.rings.number_field.number_field as number_field\n'), ((38681, 38700), 'sage.rings.rational_field.is_RationalField', 'is_RationalField', (['K'], {}), '(K)\n', (38697, 38700), False, 'from sage.rings.rational_field import is_RationalField\n'), ((38800, 38824), 'sage.rings.all.CyclotomicField', 'rings.CyclotomicField', (['n'], {}), '(n)\n', (38821, 38824), True, 'import sage.rings.all as rings\n'), ((41297, 41331), 'sage.rings.number_field.number_field.is_CyclotomicField', 'number_field.is_CyclotomicField', (['K'], {}), '(K)\n', (41328, 41331), True, 'import sage.rings.number_field.number_field as number_field\n'), ((41335, 41354), 'sage.rings.rational_field.is_RationalField', 'is_RationalField', (['K'], {}), '(K)\n', (41351, 41354), False, 'from sage.rings.rational_field import is_RationalField\n'), ((77298, 77327), 'sage.rings.all.IntegerModRing', 'rings.IntegerModRing', (['modulus'], {}), '(modulus)\n', (77318, 77327), True, 'import sage.rings.all as rings\n'), ((23643, 23679), 'sage.rings.all.PowerSeriesRing', 'rings.PowerSeriesRing', (['rings.QQ', '"""t"""'], {}), "(rings.QQ, 't')\n", (23664, 23679), True, 'import sage.rings.all as rings\n'), ((82504, 82522), 'sage.arith.all.gcd', 'gcd', (['zeta_order', 'i'], {}), '(zeta_order, i)\n', (82507, 82522), False, 'from sage.arith.all import binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation\n'), ((94556, 94565), 'sage.arith.all.gcd', 'gcd', (['e', 'n'], {}), '(e, n)\n', (94559, 94565), False, 'from sage.arith.all import binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation\n'), ((94604, 94615), 'sage.rings.all.ZZ', 'rings.ZZ', (['p'], {}), '(p)\n', (94612, 94615), True, 'import sage.rings.all as rings\n'), ((94965, 94988), 'sage.rings.all.IntegerModRing', 'rings.IntegerModRing', (['n'], {}), '(n)\n', (94985, 94988), True, 'import sage.rings.all as rings\n'), ((23944, 23956), 'sage.arith.all.factorial', 'factorial', (['k'], {}), '(k)\n', (23953, 23956), False, 'from sage.arith.all import binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation\n'), ((23326, 23340), 'sage.arith.all.binomial', 'binomial', (['k', 'j'], {}), '(k, j)\n', (23334, 23340), False, 'from sage.arith.all import binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation\n'), ((23342, 23362), 'sage.arith.all.bernoulli', 'bernoulli', (['j'], {}), '(j, **opts)\n', (23351, 23362), False, 'from sage.arith.all import binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation\n')] |
"""
Module for sending Push Notifications
"""
import logging
import requests
from django.conf import settings
from ...models import PushNotificationTranslation
from ...models import Region
from ...constants import push_notifications as pnt_const
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class PushNotificationSender:
"""
Sends push notifications via FCM HTTP API.
Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json
"""
fcm_url = "https://fcm.googleapis.com/fcm/send"
def __init__(self, push_notification):
"""
Load relevant push notification translations and prepare content for sending
:param push_notification: the push notification that should be sent
:type push_notification: ~cms.models.push_notifications.push_notification.PushNotification
"""
self.push_notification = push_notification
self.prepared_pnts = []
self.primary_pnt = PushNotificationTranslation.objects.get(
push_notification=push_notification,
language=push_notification.region.default_language,
)
if len(self.primary_pnt.title) > 0:
self.prepared_pnts.append(self.primary_pnt)
self.load_secondary_pnts()
self.auth_key = self.get_auth_key()
def load_secondary_pnts(self):
"""
Load push notification translations in other languages
"""
secondary_pnts = PushNotificationTranslation.objects.filter(
push_notification=self.push_notification
).exclude(id=self.primary_pnt.id)
for secondary_pnt in secondary_pnts:
if (
secondary_pnt.title == ""
and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode
):
secondary_pnt.title = self.primary_pnt.title
secondary_pnt.text = self.primary_pnt.text
self.prepared_pnts.append(secondary_pnt)
if len(secondary_pnt.title) > 0:
self.prepared_pnts.append(secondary_pnt)
def is_valid(self):
"""
Check if all data for sending push notifications is available
:return: all prepared push notification translations are valid
:rtype: bool
"""
if self.auth_key is None:
return False
for pnt in self.prepared_pnts:
if not pnt.title:
logger.debug("%r has no title", pnt)
return False
return True
@staticmethod
def get_auth_key():
"""
Get FCM API auth key
:return: FCM API auth key
:rtype: str
"""
fcm_auth_config_key = "fcm_auth_key"
auth_key = settings.FCM_KEY
if auth_key.exists():
logger.debug("Got fcm_auth_key from database")
return auth_key.first().value
logger.warning(
"Could not get %r from configuration database", fcm_auth_config_key
)
return None
def send_pn(self, pnt):
"""
Send single push notification translation
:param pnt: the prepared push notification translation to be sent
:type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation
:return: Response of the :mod:`requests` library
:rtype: ~requests.Response
"""
if settings.DEBUG:
region_slug = Region.objects.get(
id=settings.TEST_BLOG_ID
).slug # Testumgebung - prevent sending PNs to actual users in development
else:
region_slug = self.push_notification.region.slug
payload = {
"to": f"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}",
"notification": {"title": pnt.title, "body": pnt.text},
"data": {
"lanCode": pnt.language.slug,
"city": self.push_notification.region.slug,
},
}
headers = {"Authorization": f"key={self.auth_key}"}
return requests.post(self.fcm_url, json=payload, headers=headers)
# pylint: disable=too-many-arguments
def send_all(self):
"""
Send all prepared push notification translations
:return: Success status
:rtype: bool
"""
status = True
for pnt in self.prepared_pnts:
res = self.send_pn(pnt)
if res.status_code == 200:
logger.info("%r sent, FCM id: %r", pnt, res.json()["message_id"])
else:
status = False
logger.warning(
"Received invalid response from FCM for %r, status: %r, body: %r",
pnt,
res.status_code,
res.text,
)
return status
| [
"logging.getLogger",
"requests.post"
] | [((258, 285), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (275, 285), False, 'import logging\n'), ((4140, 4198), 'requests.post', 'requests.post', (['self.fcm_url'], {'json': 'payload', 'headers': 'headers'}), '(self.fcm_url, json=payload, headers=headers)\n', (4153, 4198), False, 'import requests\n')] |
#coding:utf-8
#
# id: functional.index.create.03
# title: CREATE ASC INDEX
# decription: CREATE ASC INDEX
#
# Dependencies:
# CREATE DATABASE
# CREATE TABLE
# SHOW INDEX
# tracker_id:
# min_versions: []
# versions: 1.0
# qmid: functional.index.create.create_index_03
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 1.0
# resources: None
substitutions_1 = []
init_script_1 = """CREATE TABLE t( a INTEGER);
commit;"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """CREATE ASC INDEX test ON t(a);
SHOW INDEX test;"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """TEST INDEX ON T(A)"""
@pytest.mark.version('>=1.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| [
"pytest.mark.version",
"firebird.qa.db_factory",
"firebird.qa.isql_act"
] | [((563, 608), 'firebird.qa.db_factory', 'db_factory', ([], {'sql_dialect': '(3)', 'init': 'init_script_1'}), '(sql_dialect=3, init=init_script_1)\n', (573, 608), False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((689, 751), 'firebird.qa.isql_act', 'isql_act', (['"""db_1"""', 'test_script_1'], {'substitutions': 'substitutions_1'}), "('db_1', test_script_1, substitutions=substitutions_1)\n", (697, 751), False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((800, 828), 'pytest.mark.version', 'pytest.mark.version', (['""">=1.0"""'], {}), "('>=1.0')\n", (819, 828), False, 'import pytest\n')] |
import numpy as np
from defdap.quat import Quat
hex_syms = Quat.symEqv("hexagonal")
# subset of hexagonal symmetries that give unique orientations when the
# Burgers transformation is applied
unq_hex_syms = [
hex_syms[0],
hex_syms[5],
hex_syms[4],
hex_syms[2],
hex_syms[10],
hex_syms[11]
]
cubic_syms = Quat.symEqv("cubic")
# subset of cubic symmetries that give unique orientations when the
# Burgers transformation is applied
unq_cub_syms = [
cubic_syms[0],
cubic_syms[7],
cubic_syms[9],
cubic_syms[1],
cubic_syms[22],
cubic_syms[16],
cubic_syms[12],
cubic_syms[15],
cubic_syms[4],
cubic_syms[8],
cubic_syms[21],
cubic_syms[20]
]
# HCP -> BCC
burg_eulers = np.array([135, 90, 354.74]) * np.pi / 180
burg_trans = Quat.fromEulerAngles(*burg_eulers).conjugate
| [
"numpy.array",
"defdap.quat.Quat.symEqv",
"defdap.quat.Quat.fromEulerAngles"
] | [((60, 84), 'defdap.quat.Quat.symEqv', 'Quat.symEqv', (['"""hexagonal"""'], {}), "('hexagonal')\n", (71, 84), False, 'from defdap.quat import Quat\n'), ((329, 349), 'defdap.quat.Quat.symEqv', 'Quat.symEqv', (['"""cubic"""'], {}), "('cubic')\n", (340, 349), False, 'from defdap.quat import Quat\n'), ((789, 823), 'defdap.quat.Quat.fromEulerAngles', 'Quat.fromEulerAngles', (['*burg_eulers'], {}), '(*burg_eulers)\n', (809, 823), False, 'from defdap.quat import Quat\n'), ((734, 761), 'numpy.array', 'np.array', (['[135, 90, 354.74]'], {}), '([135, 90, 354.74])\n', (742, 761), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, print_function, absolute_import
from pint.util import (UnitsContainer)
from pint.converters import (ScaleConverter, OffsetConverter)
from pint.definitions import (Definition, PrefixDefinition, UnitDefinition,
DimensionDefinition, AliasDefinition)
from pint.testsuite import BaseTestCase
class TestDefinition(BaseTestCase):
def test_invalid(self):
self.assertRaises(ValueError, Definition.from_string, 'x = [time] * meter')
self.assertRaises(ValueError, Definition.from_string, '[x] = [time] * meter')
def test_prefix_definition(self):
for definition in ('m- = 1e-3', 'm- = 10**-3', 'm- = 0.001'):
x = Definition.from_string(definition)
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'm')
self.assertEqual(x.aliases, ())
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(0.001), 1)
self.assertEqual(str(x), 'm')
x = Definition.from_string('kilo- = 1e-3 = k-')
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'kilo')
self.assertEqual(x.aliases, ())
self.assertEqual(x.symbol, 'k')
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(.001), 1)
x = Definition.from_string('kilo- = 1e-3 = k- = anotherk-')
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'kilo')
self.assertEqual(x.aliases, ('anotherk', ))
self.assertEqual(x.symbol, 'k')
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(.001), 1)
def test_baseunit_definition(self):
x = Definition.from_string('meter = [length]')
self.assertIsInstance(x, UnitDefinition)
self.assertTrue(x.is_base)
self.assertEqual(x.reference, UnitsContainer({'[length]': 1}))
def test_unit_definition(self):
x = Definition.from_string('coulomb = ampere * second')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 1)
self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1))
x = Definition.from_string('faraday = 96485.3399 * coulomb')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 96485.3399)
self.assertEqual(x.reference, UnitsContainer(coulomb=1))
x = Definition.from_string('degF = 9 / 5 * kelvin; offset: 255.372222')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, OffsetConverter)
self.assertEqual(x.converter.scale, 9/5)
self.assertEqual(x.converter.offset, 255.372222)
self.assertEqual(x.reference, UnitsContainer(kelvin=1))
x = Definition.from_string('turn = 6.28 * radian = _ = revolution = = cycle = _')
self.assertIsInstance(x, UnitDefinition)
self.assertEqual(x.name, 'turn')
self.assertEqual(x.aliases, ('revolution', 'cycle'))
self.assertEqual(x.symbol, 'turn')
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 6.28)
self.assertEqual(x.reference, UnitsContainer(radian=1))
def test_dimension_definition(self):
x = DimensionDefinition('[time]', '', (), converter='')
self.assertTrue(x.is_base)
self.assertEqual(x.name, '[time]')
x = Definition.from_string('[speed] = [length]/[time]')
self.assertIsInstance(x, DimensionDefinition)
self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1}))
def test_alias_definition(self):
x = Definition.from_string("@alias meter = metro = metr")
self.assertIsInstance(x, AliasDefinition)
self.assertEqual(x.name, "meter")
self.assertEqual(x.aliases, ("metro", "metr"))
| [
"pint.definitions.DimensionDefinition",
"pint.definitions.Definition.from_string",
"pint.util.UnitsContainer"
] | [((1118, 1161), 'pint.definitions.Definition.from_string', 'Definition.from_string', (['"""kilo- = 1e-3 = k-"""'], {}), "('kilo- = 1e-3 = k-')\n", (1140, 1161), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((1469, 1524), 'pint.definitions.Definition.from_string', 'Definition.from_string', (['"""kilo- = 1e-3 = k- = anotherk-"""'], {}), "('kilo- = 1e-3 = k- = anotherk-')\n", (1491, 1524), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((1884, 1926), 'pint.definitions.Definition.from_string', 'Definition.from_string', (['"""meter = [length]"""'], {}), "('meter = [length]')\n", (1906, 1926), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((2131, 2182), 'pint.definitions.Definition.from_string', 'Definition.from_string', (['"""coulomb = ampere * second"""'], {}), "('coulomb = ampere * second')\n", (2153, 2182), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((2461, 2518), 'pint.definitions.Definition.from_string', 'Definition.from_string', (['"""faraday = 96485.3399 * coulomb"""'], {}), "('faraday = 96485.3399 * coulomb')\n", (2483, 2518), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((2798, 2865), 'pint.definitions.Definition.from_string', 'Definition.from_string', (['"""degF = 9 / 5 * kelvin; offset: 255.372222"""'], {}), "('degF = 9 / 5 * kelvin; offset: 255.372222')\n", (2820, 2865), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((3194, 3271), 'pint.definitions.Definition.from_string', 'Definition.from_string', (['"""turn = 6.28 * radian = _ = revolution = = cycle = _"""'], {}), "('turn = 6.28 * radian = _ = revolution = = cycle = _')\n", (3216, 3271), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((3729, 3780), 'pint.definitions.DimensionDefinition', 'DimensionDefinition', (['"""[time]"""', '""""""', '()'], {'converter': '""""""'}), "('[time]', '', (), converter='')\n", (3748, 3780), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((3872, 3923), 'pint.definitions.Definition.from_string', 'Definition.from_string', (['"""[speed] = [length]/[time]"""'], {}), "('[speed] = [length]/[time]')\n", (3894, 3923), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((4113, 4166), 'pint.definitions.Definition.from_string', 'Definition.from_string', (['"""@alias meter = metro = metr"""'], {}), "('@alias meter = metro = metr')\n", (4135, 4166), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((756, 790), 'pint.definitions.Definition.from_string', 'Definition.from_string', (['definition'], {}), '(definition)\n', (778, 790), False, 'from pint.definitions import Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition\n'), ((2049, 2080), 'pint.util.UnitsContainer', 'UnitsContainer', (["{'[length]': 1}"], {}), "({'[length]': 1})\n", (2063, 2080), False, 'from pint.util import UnitsContainer\n'), ((2412, 2446), 'pint.util.UnitsContainer', 'UnitsContainer', ([], {'ampere': '(1)', 'second': '(1)'}), '(ampere=1, second=1)\n', (2426, 2446), False, 'from pint.util import UnitsContainer\n'), ((2758, 2783), 'pint.util.UnitsContainer', 'UnitsContainer', ([], {'coulomb': '(1)'}), '(coulomb=1)\n', (2772, 2783), False, 'from pint.util import UnitsContainer\n'), ((3155, 3179), 'pint.util.UnitsContainer', 'UnitsContainer', ([], {'kelvin': '(1)'}), '(kelvin=1)\n', (3169, 3179), False, 'from pint.util import UnitsContainer\n'), ((3649, 3673), 'pint.util.UnitsContainer', 'UnitsContainer', ([], {'radian': '(1)'}), '(radian=1)\n', (3663, 3673), False, 'from pint.util import UnitsContainer\n'), ((4016, 4061), 'pint.util.UnitsContainer', 'UnitsContainer', (["{'[length]': 1, '[time]': -1}"], {}), "({'[length]': 1, '[time]': -1})\n", (4030, 4061), False, 'from pint.util import UnitsContainer\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 16:34:21 2018
@author: LiHongWang
"""
import os
import tensorflow as tf
from model import fcn_vgg
from model import fcn_mobile
from model import fcn_resnet_v2
from data import input_data
slim = tf.contrib.slim
def main():
num_classes=2
tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord'
train_dir = './fm2/'
if not os.path.exists(train_dir):
os.makedirs(train_dir)
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
tf.logging.set_verbosity(tf.logging.INFO)
with tf.device("/cpu:0"):
samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66,
crop_size=[224,224],
batch_size=4)
batch_queue = slim.prefetch_queue.prefetch_queue(samples,
capacity=128 )
tra_batch = batch_queue.dequeue()
logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes)
# logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes)
# logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes)
cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit,
labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name="entropy")
loss = tf.reduce_mean(cross_entropy,name='loss')
slim.losses.add_loss(loss)
total_loss = slim.losses.get_total_loss()
# print("image", tra_batch['image'])
# print("label", tf.cast(tra_batch['label']*255, tf.uint8))
# print("prediction", tf.cast(prediction*255, tf.uint8))
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total_Loss', total_loss)
tf.summary.image("image", tra_batch['image'], max_outputs=4)
tf.summary.image("label", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4)
tf.summary.image("prediction", tf.cast(prediction*255, tf.uint8), max_outputs=4)
lr = tf.train.exponential_decay(0.001,
global_step,
10000,
0.8,
staircase=True)
#lr = tf.constant(0.001, tf.float32)
tf.summary.scalar('learning_rate', lr)
for variable in slim.get_model_variables():
tf.summary.histogram(variable.op.name, variable)
# Specify the optimizer and create the train op:
optimizer = tf.train.RMSPropOptimizer(lr,0.9)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
config=tf.ConfigProto(gpu_options=gpu_options)
final_loss = slim.learning.train(train_op,
logdir=train_dir,
log_every_n_steps=100,
save_summaries_secs=20,
save_interval_secs=1800,
init_fn=None,#fcn_mobile.get_init_fn(),
session_config=config,
number_of_steps=65000)
print('Finished training. Last batch loss %f' % final_loss)
if __name__=='__main__':
main() | [
"os.path.exists",
"tensorflow.device",
"tensorflow.ConfigProto",
"tensorflow.Graph",
"os.makedirs",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.logging.set_verbosity",
"tensorflow.contrib.framework.get_or_create_global_step",
"model.fcn_mobile.fcn_mobv1",
"tensorflow.summary.histogram",
"data.input_data.get_images_labels",
"tensorflow.train.exponential_decay",
"tensorflow.reduce_mean",
"tensorflow.summary.scalar",
"tensorflow.cast",
"tensorflow.GPUOptions",
"tensorflow.squeeze",
"tensorflow.summary.image"
] | [((440, 465), 'os.path.exists', 'os.path.exists', (['train_dir'], {}), '(train_dir)\n', (454, 465), False, 'import os\n'), ((476, 498), 'os.makedirs', 'os.makedirs', (['train_dir'], {}), '(train_dir)\n', (487, 498), False, 'import os\n'), ((567, 615), 'tensorflow.contrib.framework.get_or_create_global_step', 'tf.contrib.framework.get_or_create_global_step', ([], {}), '()\n', (613, 615), True, 'import tensorflow as tf\n'), ((625, 666), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (649, 666), True, 'import tensorflow as tf\n'), ((1190, 1243), 'model.fcn_mobile.fcn_mobv1', 'fcn_mobile.fcn_mobv1', (["tra_batch['image']", 'num_classes'], {}), "(tra_batch['image'], num_classes)\n", (1210, 1243), False, 'from model import fcn_mobile\n'), ((1664, 1706), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {'name': '"""loss"""'}), "(cross_entropy, name='loss')\n", (1678, 1706), True, 'import tensorflow as tf\n'), ((2061, 2111), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""losses/Total_Loss"""', 'total_loss'], {}), "('losses/Total_Loss', total_loss)\n", (2078, 2111), True, 'import tensorflow as tf\n'), ((2121, 2181), 'tensorflow.summary.image', 'tf.summary.image', (['"""image"""', "tra_batch['image']"], {'max_outputs': '(4)'}), "('image', tra_batch['image'], max_outputs=4)\n", (2137, 2181), True, 'import tensorflow as tf\n'), ((2413, 2487), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['(0.001)', 'global_step', '(10000)', '(0.8)'], {'staircase': '(True)'}), '(0.001, global_step, 10000, 0.8, staircase=True)\n', (2439, 2487), True, 'import tensorflow as tf\n'), ((2685, 2723), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'lr'], {}), "('learning_rate', lr)\n", (2702, 2723), True, 'import tensorflow as tf\n'), ((2925, 2959), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['lr', '(0.9)'], {}), '(lr, 0.9)\n', (2950, 2959), True, 'import tensorflow as tf\n'), ((3102, 3152), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.7)'}), '(per_process_gpu_memory_fraction=0.7)\n', (3115, 3152), True, 'import tensorflow as tf\n'), ((3171, 3210), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (3185, 3210), True, 'import tensorflow as tf\n'), ((691, 710), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (700, 710), True, 'import tensorflow as tf\n'), ((733, 832), 'data.input_data.get_images_labels', 'input_data.get_images_labels', (['tfRecorf_dir', 'num_classes', '(66)'], {'crop_size': '[224, 224]', 'batch_size': '(4)'}), '(tfRecorf_dir, num_classes, 66, crop_size=[224,\n 224], batch_size=4)\n', (761, 832), False, 'from data import input_data\n'), ((2217, 2260), 'tensorflow.cast', 'tf.cast', (["(tra_batch['label'] * 255)", 'tf.uint8'], {}), "(tra_batch['label'] * 255, tf.uint8)\n", (2224, 2260), True, 'import tensorflow as tf\n'), ((2315, 2350), 'tensorflow.cast', 'tf.cast', (['(prediction * 255)', 'tf.uint8'], {}), '(prediction * 255, tf.uint8)\n', (2322, 2350), True, 'import tensorflow as tf\n'), ((2789, 2837), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['variable.op.name', 'variable'], {}), '(variable.op.name, variable)\n', (2809, 2837), True, 'import tensorflow as tf\n'), ((519, 529), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (527, 529), True, 'import tensorflow as tf\n'), ((1563, 1611), 'tensorflow.squeeze', 'tf.squeeze', (["tra_batch['label']"], {'squeeze_dims': '[3]'}), "(tra_batch['label'], squeeze_dims=[3])\n", (1573, 1611), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from setuptools import find_packages, setup
setup(
name='filetype',
version='1.0.7',
description='Infer file type and MIME type of any file/buffer. '
'No external dependencies.',
long_description=codecs.open('README.rst', 'r',
encoding='utf-8', errors='ignore').read(),
keywords='file libmagic magic infer numbers magicnumbers discovery mime '
'type kind',
url='https://github.com/h2non/filetype.py',
download_url='https://github.com/h2non/filetype.py/tarball/master',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
license_files=['LICENSE'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System',
'Topic :: System :: Filesystems',
'Topic :: Utilities'],
platforms=['any'],
packages=find_packages(exclude=['dist', 'build', 'docs', 'tests',
'examples']),
package_data={'filetype': ['LICENSE', '*.md']},
zip_safe=True)
| [
"codecs.open",
"setuptools.find_packages"
] | [((1441, 1510), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['dist', 'build', 'docs', 'tests', 'examples']"}), "(exclude=['dist', 'build', 'docs', 'tests', 'examples'])\n", (1454, 1510), False, 'from setuptools import find_packages, setup\n'), ((290, 355), 'codecs.open', 'codecs.open', (['"""README.rst"""', '"""r"""'], {'encoding': '"""utf-8"""', 'errors': '"""ignore"""'}), "('README.rst', 'r', encoding='utf-8', errors='ignore')\n", (301, 355), False, 'import codecs\n')] |
from tools.geofunc import GeoFunc
import pandas as pd
import json
def getData(index):
'''报错数据集有(空心):han,jakobs1,jakobs2 '''
'''形状过多暂时未处理:shapes、shirt、swim、trousers'''
name=["ga","albano","blaz1","blaz2","dighe1","dighe2","fu","han","jakobs1","jakobs2","mao","marques","shapes","shirts","swim","trousers"]
print("开始处理",name[index],"数据集")
'''暂时没有考虑宽度,全部缩放来表示'''
scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50]
print("缩放",scale[index],"倍")
df = pd.read_csv("data/"+name[index]+".csv")
polygons=[]
for i in range(0,df.shape[0]):
for j in range(0,df['num'][i]):
poly=json.loads(df['polygon'][i])
GeoFunc.normData(poly,scale[index])
polygons.append(poly)
return polygons
| [
"tools.geofunc.GeoFunc.normData",
"json.loads",
"pandas.read_csv"
] | [((479, 522), 'pandas.read_csv', 'pd.read_csv', (["('data/' + name[index] + '.csv')"], {}), "('data/' + name[index] + '.csv')\n", (490, 522), True, 'import pandas as pd\n'), ((627, 655), 'json.loads', 'json.loads', (["df['polygon'][i]"], {}), "(df['polygon'][i])\n", (637, 655), False, 'import json\n'), ((668, 704), 'tools.geofunc.GeoFunc.normData', 'GeoFunc.normData', (['poly', 'scale[index]'], {}), '(poly, scale[index])\n', (684, 704), False, 'from tools.geofunc import GeoFunc\n')] |
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 <NAME> and VFRAME
# https://vframe.io
#
#############################################################################
import click
@click.command('')
@click.option('-i', '--input', 'opt_dir_in', required=True)
@click.option('-r', '--recursive', 'opt_recursive', is_flag=True)
@click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=True,
help='Glob extension')
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('-t', '--threads', 'opt_threads', default=None)
@click.pass_context
def cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads):
"""Multiprocessor image template"""
# ------------------------------------------------
# imports
from os.path import join
from pathlib import Path
from dataclasses import asdict
import numpy as np
import cv2 as cv
from tqdm import tqdm
from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import cpu_count
from vframe.settings import app_cfg
from vframe.settings.modelzoo_cfg import modelzoo
from vframe.models.dnn import DNN
from vframe.image.dnn_factory import DNNFactory
from vframe.utils import file_utils
from vframe.utils.video_utils import FileVideoStream, mediainfo
log = app_cfg.LOG
# set N threads
if not opt_threads:
opt_threads = cpu_count() # maximum
# glob items
fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive)
if any(opt_slice):
fp_items = fp_items[opt_slice[0]:opt_slice[1]]
log.info(f'Processing: {len(fp_items):,} files')
# -----------------------------------------------------------
# start pool worker
def pool_worker(pool_item):
# init threaded video reader
fp = pool_item['fp']
result = {'fp': fp}
# add media metadata
im = cv.imread(fp)
for i in range(20):
im = cv.blur(im, (35,35))
return result
# end pool worker
# -----------------------------------------------------------
# convert file list into object with
pool_items = [{'fp': fp} for fp in fp_items]
# init processing pool iterator
# use imap instead of map via @hkyi Stack Overflow 41920124
desc = f'image-mp x{opt_threads}'
with Pool(opt_threads) as p:
pool_results = list(tqdm(p.imap(pool_worker, pool_items), total=len(fp_items), desc=desc)) | [
"click.option",
"click.command",
"cv2.blur",
"vframe.utils.file_utils.glob_multi",
"pathos.multiprocessing.cpu_count",
"cv2.imread",
"pathos.multiprocessing.ProcessingPool"
] | [((261, 278), 'click.command', 'click.command', (['""""""'], {}), "('')\n", (274, 278), False, 'import click\n'), ((280, 338), 'click.option', 'click.option', (['"""-i"""', '"""--input"""', '"""opt_dir_in"""'], {'required': '(True)'}), "('-i', '--input', 'opt_dir_in', required=True)\n", (292, 338), False, 'import click\n'), ((340, 404), 'click.option', 'click.option', (['"""-r"""', '"""--recursive"""', '"""opt_recursive"""'], {'is_flag': '(True)'}), "('-r', '--recursive', 'opt_recursive', is_flag=True)\n", (352, 404), False, 'import click\n'), ((406, 512), 'click.option', 'click.option', (['"""-e"""', '"""--ext"""', '"""opt_exts"""'], {'default': "['jpg', 'png']", 'multiple': '(True)', 'help': '"""Glob extension"""'}), "('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=\n True, help='Glob extension')\n", (418, 512), False, 'import click\n'), ((512, 619), 'click.option', 'click.option', (['"""--slice"""', '"""opt_slice"""'], {'type': '(int, int)', 'default': '(None, None)', 'help': '"""Slice list of files"""'}), "('--slice', 'opt_slice', type=(int, int), default=(None, None),\n help='Slice list of files')\n", (524, 619), False, 'import click\n'), ((619, 679), 'click.option', 'click.option', (['"""-t"""', '"""--threads"""', '"""opt_threads"""'], {'default': 'None'}), "('-t', '--threads', 'opt_threads', default=None)\n", (631, 679), False, 'import click\n'), ((1554, 1622), 'vframe.utils.file_utils.glob_multi', 'file_utils.glob_multi', (['opt_dir_in', 'opt_exts'], {'recursive': 'opt_recursive'}), '(opt_dir_in, opt_exts, recursive=opt_recursive)\n', (1575, 1622), False, 'from vframe.utils import file_utils\n'), ((1502, 1513), 'pathos.multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1511, 1513), False, 'from pathos.multiprocessing import cpu_count\n'), ((1983, 1996), 'cv2.imread', 'cv.imread', (['fp'], {}), '(fp)\n', (1992, 1996), True, 'import cv2 as cv\n'), ((2386, 2403), 'pathos.multiprocessing.ProcessingPool', 'Pool', (['opt_threads'], {}), '(opt_threads)\n', (2390, 2403), True, 'from pathos.multiprocessing import ProcessingPool as Pool\n'), ((2032, 2053), 'cv2.blur', 'cv.blur', (['im', '(35, 35)'], {}), '(im, (35, 35))\n', (2039, 2053), True, 'import cv2 as cv\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import matplotlib.pyplot as plt
import CurveFit
import shutil
#find all DIRECTORIES containing non-hidden files ending in FILENAME
def getDataDirectories(DIRECTORY, FILENAME="valLoss.txt"):
directories=[]
for directory in os.scandir(DIRECTORY):
for item in os.scandir(directory):
if item.name.endswith(FILENAME) and not item.name.startswith("."):
directories.append(directory.path)
return directories
#get all non-hidden data files in DIRECTORY with extension EXT
def getDataFiles(DIRECTORY, EXT='txt'):
datafiles=[]
for item in os.scandir(DIRECTORY):
if item.name.endswith("."+EXT) and not item.name.startswith("."):
datafiles.append(item.path)
return datafiles
#checking if loss ever doesn't decrease for numEpochs epochs in a row.
def stopsDecreasing(loss, epoch, numEpochs):
minLoss=np.inf
epochMin=0
for i in range(0,loss.size):
if loss[i] < minLoss:
minLoss=loss[i]
epochMin=epoch[i]
elif (epoch[i]-epochMin) >= numEpochs:
return i, minLoss
return i, minLoss
#dirpath is where the accuracy and loss files are stored. want to move the files into the same format expected by grabNNData.
def createFolders(SEARCHDIR, SAVEDIR):
for item in os.scandir(SEARCHDIR):
name=str(item.name)
files=name.split('-')
SAVEFULLDIR=SAVEDIR+str(files[0])
if not os.path.exists(SAVEFULLDIR):
try:
os.makedirs(SAVEFULLDIR)
except FileExistsError:
#directory already exists--must have been created between the if statement & our attempt at making directory
pass
shutil.move(item.path, SAVEFULLDIR+"/"+str(files[1]))
#a function to read in information (e.g. accuracy, loss) stored at FILENAME
def grabNNData(FILENAME, header='infer', sep=' '):
data = pd.read_csv(FILENAME, sep, header=header)
if ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns) and ('batch_size' in data.columns) and ('learning_rate' in data.columns):
sortedData=data.sort_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
batch_size=np.array(sortedData['batch_size'])
learning_rate=np.array(sortedData['learning_rate'])
convKers=np.array(sortedData['convKernels'])
return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers)
elif ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns):
sortedData=data.sort_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
else:
print("Missing a column in NN datafile")
raise Exception('NN datafile is missing one of the expected columns: epochs trainLoss valLoss valAcc [optional extra columns: batch_size, learning_rate]')
#slice data could be used to test values of E other than E=0.5, which we use by default
def sliceData(xsize, x, y, z=None, w=None):
#we can slice the data to sample less often, but not more often. We verify that we're not being asked for a granularity that is smaller than the frequency of datapoints in the vectors.
if x[0] > xsize:
return x,y,z,w
else:
result=(1.0/x[0])*xsize
#result is how often we should take datapoints if we wish to consider values every xsize
x=x[int(result-1)::int(result)]
y=y[int(result-1)::int(result)]
if z is not None:
z=z[int(result-1)::int(result)]
if w is None:
return x,y,z
else:
return x,y
#if we get to this point in function, it means z and w are both not None.
w=w[int(result-1)::int(result)]
return x,y,z,w
| [
"os.path.exists",
"os.makedirs",
"pandas.read_csv",
"os.scandir",
"numpy.array"
] | [((312, 333), 'os.scandir', 'os.scandir', (['DIRECTORY'], {}), '(DIRECTORY)\n', (322, 333), False, 'import os\n'), ((668, 689), 'os.scandir', 'os.scandir', (['DIRECTORY'], {}), '(DIRECTORY)\n', (678, 689), False, 'import os\n'), ((1388, 1409), 'os.scandir', 'os.scandir', (['SEARCHDIR'], {}), '(SEARCHDIR)\n', (1398, 1409), False, 'import os\n'), ((2001, 2042), 'pandas.read_csv', 'pd.read_csv', (['FILENAME', 'sep'], {'header': 'header'}), '(FILENAME, sep, header=header)\n', (2012, 2042), True, 'import pandas as pd\n'), ((355, 376), 'os.scandir', 'os.scandir', (['directory'], {}), '(directory)\n', (365, 376), False, 'import os\n'), ((2338, 2368), 'numpy.array', 'np.array', (["sortedData['epochs']"], {}), "(sortedData['epochs'])\n", (2346, 2368), True, 'import numpy as np\n'), ((2387, 2420), 'numpy.array', 'np.array', (["sortedData['trainLoss']"], {}), "(sortedData['trainLoss'])\n", (2395, 2420), True, 'import numpy as np\n'), ((2437, 2468), 'numpy.array', 'np.array', (["sortedData['valLoss']"], {}), "(sortedData['valLoss'])\n", (2445, 2468), True, 'import numpy as np\n'), ((2484, 2514), 'numpy.array', 'np.array', (["sortedData['valAcc']"], {}), "(sortedData['valAcc'])\n", (2492, 2514), True, 'import numpy as np\n'), ((2535, 2569), 'numpy.array', 'np.array', (["sortedData['batch_size']"], {}), "(sortedData['batch_size'])\n", (2543, 2569), True, 'import numpy as np\n'), ((2592, 2629), 'numpy.array', 'np.array', (["sortedData['learning_rate']"], {}), "(sortedData['learning_rate'])\n", (2600, 2629), True, 'import numpy as np\n'), ((2648, 2683), 'numpy.array', 'np.array', (["sortedData['convKernels']"], {}), "(sortedData['convKernels'])\n", (2656, 2683), True, 'import numpy as np\n'), ((1526, 1553), 'os.path.exists', 'os.path.exists', (['SAVEFULLDIR'], {}), '(SAVEFULLDIR)\n', (1540, 1553), False, 'import os\n'), ((3016, 3046), 'numpy.array', 'np.array', (["sortedData['epochs']"], {}), "(sortedData['epochs'])\n", (3024, 3046), True, 'import numpy as np\n'), ((3065, 3098), 'numpy.array', 'np.array', (["sortedData['trainLoss']"], {}), "(sortedData['trainLoss'])\n", (3073, 3098), True, 'import numpy as np\n'), ((3115, 3146), 'numpy.array', 'np.array', (["sortedData['valLoss']"], {}), "(sortedData['valLoss'])\n", (3123, 3146), True, 'import numpy as np\n'), ((3162, 3192), 'numpy.array', 'np.array', (["sortedData['valAcc']"], {}), "(sortedData['valAcc'])\n", (3170, 3192), True, 'import numpy as np\n'), ((1588, 1612), 'os.makedirs', 'os.makedirs', (['SAVEFULLDIR'], {}), '(SAVEFULLDIR)\n', (1599, 1612), False, 'import os\n')] |
import os.path
import time
import logging
import yaml
from piecrust.processing.base import Processor
logger = logging.getLogger(__name__)
class _ConcatInfo(object):
timestamp = 0
files = None
delim = "\n"
class ConcatProcessor(Processor):
PROCESSOR_NAME = 'concat'
def __init__(self):
super(ConcatProcessor, self).__init__()
self._cache = {}
def matches(self, path):
return path.endswith('.concat')
def getDependencies(self, path):
info = self._load(path)
return info.files
def getOutputFilenames(self, filename):
return [filename[:-7]]
def process(self, path, out_dir):
dirname, filename = os.path.split(path)
out_path = os.path.join(out_dir, filename[:-7])
info = self._load(path)
if not info.files:
raise Exception("No files specified in: %s" %
os.path.relpath(path, self.app.root_dir))
logger.debug("Concatenating %d files to: %s" %
(len(info.files), out_path))
encoded_delim = info.delim.encode('utf8')
with open(out_path, 'wb') as ofp:
for p in info.files:
with open(p, 'rb') as ifp:
ofp.write(ifp.read())
if info.delim:
ofp.write(encoded_delim)
return True
def _load(self, path):
cur_time = time.time()
info = self._cache.get(path)
if (info is not None and
(cur_time - info.timestamp <= 1 or
os.path.getmtime(path) < info.timestamp)):
return info
if info is None:
info = _ConcatInfo()
self._cache[path] = info
with open(path, 'r') as fp:
config = yaml.load(fp)
info.files = config.get('files', [])
info.delim = config.get('delim', "\n")
info.timestamp = cur_time
path_mode = config.get('path_mode', 'relative')
if path_mode == 'relative':
dirname, _ = os.path.split(path)
info.files = [os.path.join(dirname, f) for f in info.files]
elif path_mode == 'absolute':
info.files = [os.path.join(self.app.root_dir, f)
for f in info.files]
else:
raise Exception("Unknown path mode: %s" % path_mode)
return info
| [
"logging.getLogger",
"time.time",
"yaml.load"
] | [((112, 139), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (129, 139), False, 'import logging\n'), ((1416, 1427), 'time.time', 'time.time', ([], {}), '()\n', (1425, 1427), False, 'import time\n'), ((1787, 1800), 'yaml.load', 'yaml.load', (['fp'], {}), '(fp)\n', (1796, 1800), False, 'import yaml\n')] |
import FWCore.ParameterSet.Config as cms
#
# module to make the MaxSumPtWMass jet combination
#
findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass",
## jet input
jets = cms.InputTag("selectedPatJets"),
## lepton input
leps = cms.InputTag("selectedPatMuons"),
## maximum number of jets to be considered
maxNJets = cms.int32(4),
## nominal WMass parameter (in GeV)
wMass = cms.double(80.4),
## use b-tagging two distinguish between light and b jets
useBTagging = cms.bool(False),
## choose algorithm for b-tagging
bTagAlgorithm = cms.string("trackCountingHighEffBJetTags"),
## minimum b discriminator value required for b jets and
## maximum b discriminator value allowed for non-b jets
minBDiscBJets = cms.double(1.0),
maxBDiscLightJets = cms.double(3.0)
)
| [
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.double",
"FWCore.ParameterSet.Config.InputTag",
"FWCore.ParameterSet.Config.int32",
"FWCore.ParameterSet.Config.bool"
] | [((211, 242), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""selectedPatJets"""'], {}), "('selectedPatJets')\n", (223, 242), True, 'import FWCore.ParameterSet.Config as cms\n'), ((277, 309), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""selectedPatMuons"""'], {}), "('selectedPatMuons')\n", (289, 309), True, 'import FWCore.ParameterSet.Config as cms\n'), ((374, 386), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(4)'], {}), '(4)\n', (383, 386), True, 'import FWCore.ParameterSet.Config as cms\n'), ((443, 459), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(80.4)'], {}), '(80.4)\n', (453, 459), True, 'import FWCore.ParameterSet.Config as cms\n'), ((541, 556), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (549, 556), True, 'import FWCore.ParameterSet.Config as cms\n'), ((616, 658), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""trackCountingHighEffBJetTags"""'], {}), "('trackCountingHighEffBJetTags')\n", (626, 658), True, 'import FWCore.ParameterSet.Config as cms\n'), ((805, 820), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.0)'], {}), '(1.0)\n', (815, 820), True, 'import FWCore.ParameterSet.Config as cms\n'), ((846, 861), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(3.0)'], {}), '(3.0)\n', (856, 861), True, 'import FWCore.ParameterSet.Config as cms\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess.
"""
import os
import argparse
import numpy as np
from src.ms_utils import calculate_auc
from mindspore import context, load_checkpoint
def softmax(x):
t_max = np.max(x, axis=1, keepdims=True) # returns max of each row and keeps same dims
e_x = np.exp(x - t_max) # subtracts each row with its max value
t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum of each row and keeps same dims
f_x = e_x / t_sum
return f_x
def score_model(preds, test_pos, test_neg, weight, bias):
"""
Score the model on the test set edges in each epoch.
Args:
epoch (LongTensor): Training epochs.
Returns:
auc(Float32): AUC result.
f1(Float32): F1-Score result.
"""
score_positive_edges = np.array(test_pos, dtype=np.int32).T
score_negative_edges = np.array(test_neg, dtype=np.int32).T
test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :],
preds[score_positive_edges[1, :], :]), axis=1)
test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :],
preds[score_negative_edges[1, :], :]), axis=1)
# operands could not be broadcast together with shapes (4288,128) (128,3)
scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias
probability_scores = np.exp(softmax(scores))
predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1)
# predictions = predictions.asnumpy()
targets = [0]*len(test_pos) + [1]*len(test_neg)
auc, f1 = calculate_auc(targets, predictions)
return auc, f1
def get_acc():
"""get infer Accuracy."""
parser = argparse.ArgumentParser(description='postprocess')
parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'],
help='dataset name')
parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files')
parser.add_argument('--label_path', type=str, default='', help='y_test npy Files')
parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files')
parser.add_argument("--checkpoint_file", type=str, default='sgcn_alpha_f1.ckpt', help="Checkpoint file path.")
parser.add_argument("--edge_path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--features-path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--test-size", type=float,
default=0.2, help="Test dataset size. Default is 0.2.")
parser.add_argument("--seed", type=int, default=42,
help="Random seed for sklearn pre-training. Default is 42.")
parser.add_argument("--spectral-features", default=True, dest="spectral_features", action="store_true")
parser.add_argument("--reduction-iterations", type=int,
default=30, help="Number of SVD iterations. Default is 30.")
parser.add_argument("--reduction-dimensions", type=int,
default=64, help="Number of SVD feature extraction dimensions. Default is 64.")
args_opt = parser.parse_args()
# Runtime
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0)
# Create network
test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy'))
test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy'))
# Load parameters from checkpoint into network
param_dict = load_checkpoint(args_opt.checkpoint_file)
print(type(param_dict))
print(param_dict)
print(type(param_dict['regression_weights']))
print(param_dict['regression_weights'])
# load_param_into_net(net, param_dict)
pred = np.fromfile('./result_Files/repos_0.bin', np.float32)
if args_opt.dataset_name == 'bitcoin-otc':
pred = pred.reshape(5881, 64)
else:
pred = pred.reshape(3783, 64)
auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(),
param_dict['regression_bias'].asnumpy())
print("Test set results:", "auc=", "{:.5f}".format(auc), "f1=", "{:.5f}".format(f1))
if __name__ == '__main__':
get_acc()
| [
"numpy.fromfile",
"argparse.ArgumentParser",
"mindspore.context.set_context",
"os.path.join",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.array",
"mindspore.load_checkpoint",
"numpy.concatenate",
"src.ms_utils.calculate_auc"
] | [((876, 908), 'numpy.max', 'np.max', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (882, 908), True, 'import numpy as np\n'), ((967, 984), 'numpy.exp', 'np.exp', (['(x - t_max)'], {}), '(x - t_max)\n', (973, 984), True, 'import numpy as np\n'), ((1039, 1073), 'numpy.sum', 'np.sum', (['e_x'], {'axis': '(1)', 'keepdims': '(True)'}), '(e_x, axis=1, keepdims=True)\n', (1045, 1073), True, 'import numpy as np\n'), ((1597, 1702), 'numpy.concatenate', 'np.concatenate', (['(preds[score_positive_edges[0, :], :], preds[score_positive_edges[1, :], :])'], {'axis': '(1)'}), '((preds[score_positive_edges[0, :], :], preds[\n score_positive_edges[1, :], :]), axis=1)\n', (1611, 1702), True, 'import numpy as np\n'), ((1760, 1865), 'numpy.concatenate', 'np.concatenate', (['(preds[score_negative_edges[0, :], :], preds[score_negative_edges[1, :], :])'], {'axis': '(1)'}), '((preds[score_negative_edges[0, :], :], preds[\n score_negative_edges[1, :], :]), axis=1)\n', (1774, 1865), True, 'import numpy as np\n'), ((2314, 2349), 'src.ms_utils.calculate_auc', 'calculate_auc', (['targets', 'predictions'], {}), '(targets, predictions)\n', (2327, 2349), False, 'from src.ms_utils import calculate_auc\n'), ((2433, 2483), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""postprocess"""'}), "(description='postprocess')\n", (2456, 2483), False, 'import argparse\n'), ((4083, 4168), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""Ascend"""', 'device_id': '(0)'}), "(mode=context.GRAPH_MODE, device_target='Ascend',\n device_id=0)\n", (4102, 4168), False, 'from mindspore import context, load_checkpoint\n'), ((4409, 4450), 'mindspore.load_checkpoint', 'load_checkpoint', (['args_opt.checkpoint_file'], {}), '(args_opt.checkpoint_file)\n', (4424, 4450), False, 'from mindspore import context, load_checkpoint\n'), ((4655, 4708), 'numpy.fromfile', 'np.fromfile', (['"""./result_Files/repos_0.bin"""', 'np.float32'], {}), "('./result_Files/repos_0.bin', np.float32)\n", (4666, 4708), True, 'import numpy as np\n'), ((1472, 1506), 'numpy.array', 'np.array', (['test_pos'], {'dtype': 'np.int32'}), '(test_pos, dtype=np.int32)\n', (1480, 1506), True, 'import numpy as np\n'), ((1537, 1571), 'numpy.array', 'np.array', (['test_neg'], {'dtype': 'np.int32'}), '(test_neg, dtype=np.int32)\n', (1545, 1571), True, 'import numpy as np\n'), ((4211, 4261), 'os.path.join', 'os.path.join', (['args_opt.result_path', '"""pos_test.npy"""'], {}), "(args_opt.result_path, 'pos_test.npy')\n", (4223, 4261), False, 'import os\n'), ((4287, 4337), 'os.path.join', 'os.path.join', (['args_opt.result_path', '"""neg_test.npy"""'], {}), "(args_opt.result_path, 'neg_test.npy')\n", (4299, 4337), False, 'import os\n'), ((2000, 2058), 'numpy.concatenate', 'np.concatenate', (['(test_positive_z, test_negative_z)'], {'axis': '(0)'}), '((test_positive_z, test_negative_z), axis=0)\n', (2014, 2058), True, 'import numpy as np\n')] |
import re
import numpy as np
from collections import OrderedDict
import pykeops
import pykeops.config
############################################################
# define backend
############################################################
class SetBackend():
"""
This class is used to centralized the options used in PyKeops.
"""
dev = OrderedDict([('CPU',0),('GPU',1)])
grid = OrderedDict([('1D',0),('2D',1)])
memtype = OrderedDict([('host',0), ('device',1)])
possible_options_list = ['auto',
'CPU',
'GPU',
'GPU_1D', 'GPU_1D_device', 'GPU_1D_host',
'GPU_2D', 'GPU_2D_device', 'GPU_2D_host'
]
def define_tag_backend(self, backend, variables):
"""
Try to make a good guess for the backend... available methods are: (host means Cpu, device means Gpu)
CPU : computations performed with the host from host arrays
GPU_1D_device : computations performed on the device from device arrays, using the 1D scheme
GPU_2D_device : computations performed on the device from device arrays, using the 2D scheme
GPU_1D_host : computations performed on the device from host arrays, using the 1D scheme
GPU_2D_host : computations performed on the device from host data, using the 2D scheme
:param backend (str), variables (tuple)
:return (tagCPUGPU, tag1D2D, tagHostDevice)
"""
# check that the option is valid
if (backend not in self.possible_options_list):
raise ValueError('Invalid backend. Should be one of ', self.possible_options_list)
# auto : infer everything
if backend == 'auto':
return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables)
split_backend = re.split('_',backend)
if len(split_backend) == 1: # CPU or GPU
return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables)
elif len(split_backend) == 2: # GPU_1D or GPU_2D
return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables)
elif len(split_backend) == 3: # the option is known
return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]]
def define_backend(self, backend, variables):
tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables)
return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice]
@staticmethod
def _find_dev():
return int(pykeops.config.gpu_available)
@staticmethod
def _find_mem(variables):
if all([type(var) is np.ndarray for var in variables ]): # Infer if we're working with numpy arrays or torch tensors:
MemType = 0
elif pykeops.config.torch_found:
import torch
if all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for var in variables]):
from pykeops.torch.utils import is_on_device
VarsAreOnGpu = tuple(map(is_on_device, tuple(variables)))
if all(VarsAreOnGpu):
MemType = 1
elif not any(VarsAreOnGpu):
MemType = 0
else:
raise ValueError('At least two input variables have different memory locations (Cpu/Gpu).')
else:
raise TypeError('All variables should either be numpy arrays or torch tensors.')
return MemType
@staticmethod
def _find_grid():
return 0
def get_tag_backend(backend, variables, str = False):
"""
entry point to get the correct backend
"""
res = SetBackend()
if not str:
return res.define_tag_backend(backend, variables)
else:
return res.define_backend(backend, variables)
| [
"re.split",
"collections.OrderedDict"
] | [((362, 399), 'collections.OrderedDict', 'OrderedDict', (["[('CPU', 0), ('GPU', 1)]"], {}), "([('CPU', 0), ('GPU', 1)])\n", (373, 399), False, 'from collections import OrderedDict\n'), ((408, 443), 'collections.OrderedDict', 'OrderedDict', (["[('1D', 0), ('2D', 1)]"], {}), "([('1D', 0), ('2D', 1)])\n", (419, 443), False, 'from collections import OrderedDict\n'), ((455, 496), 'collections.OrderedDict', 'OrderedDict', (["[('host', 0), ('device', 1)]"], {}), "([('host', 0), ('device', 1)])\n", (466, 496), False, 'from collections import OrderedDict\n'), ((1928, 1950), 're.split', 're.split', (['"""_"""', 'backend'], {}), "('_', backend)\n", (1936, 1950), False, 'import re\n')] |
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import (
get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute,
collect_data_files)
from PyInstaller.compat import getsitepackages, is_darwin, is_win
# On Windows system PATH has to be extended to point to the PyQt5 directory.
# The PySide directory contains Qt dlls. We need to avoid including different
# version of Qt libraries when there is installed another application (e.g. QtCreator)
if is_win:
from PyInstaller.utils.win32.winutils import extend_system_path
extend_system_path([os.path.join(x, 'PyQt5') for x in getsitepackages()])
extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')),
'Qt', 'bin')])
# In the new consolidated mode any PyQt depends on _qt
hiddenimports = ['sip', 'PyQt5.Qt']
# Collect just the qt.conf file.
datas = [x for x in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if
x[0].endswith('qt.conf')]
# For Qt<5.4 to work on Mac OS X it is necessary to include `qt_menu.nib`.
# This directory contains some resource files necessary to run PyQt or PySide
# app.
if is_darwin:
# Version of the currently installed Qt 5.x shared library.
qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR')
if is_module_satisfies('Qt < 5.4', qt_version):
datas = [(qt_menu_nib_dir('PyQt5'), '')]
| [
"PyInstaller.utils.hooks.is_module_satisfies",
"PyInstaller.utils.hooks.qt_menu_nib_dir",
"PyInstaller.compat.getsitepackages",
"os.path.join",
"PyInstaller.utils.hooks.get_module_file_attribute",
"PyInstaller.utils.hooks.get_module_attribute"
] | [((1685, 1739), 'PyInstaller.utils.hooks.get_module_attribute', 'get_module_attribute', (['"""PyQt5.QtCore"""', '"""QT_VERSION_STR"""'], {}), "('PyQt5.QtCore', 'QT_VERSION_STR')\n", (1705, 1739), False, 'from PyInstaller.utils.hooks import get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files\n'), ((1747, 1790), 'PyInstaller.utils.hooks.is_module_satisfies', 'is_module_satisfies', (['"""Qt < 5.4"""', 'qt_version'], {}), "('Qt < 5.4', qt_version)\n", (1766, 1790), False, 'from PyInstaller.utils.hooks import get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files\n'), ((985, 1009), 'os.path.join', 'os.path.join', (['x', '"""PyQt5"""'], {}), "(x, 'PyQt5')\n", (997, 1009), False, 'import os\n'), ((1363, 1388), 'os.path.join', 'os.path.join', (['"""Qt"""', '"""bin"""'], {}), "('Qt', 'bin')\n", (1375, 1388), False, 'import os\n'), ((1019, 1036), 'PyInstaller.compat.getsitepackages', 'getsitepackages', ([], {}), '()\n', (1034, 1036), False, 'from PyInstaller.compat import getsitepackages, is_darwin, is_win\n'), ((1810, 1834), 'PyInstaller.utils.hooks.qt_menu_nib_dir', 'qt_menu_nib_dir', (['"""PyQt5"""'], {}), "('PyQt5')\n", (1825, 1834), False, 'from PyInstaller.utils.hooks import get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files\n'), ((1092, 1126), 'PyInstaller.utils.hooks.get_module_file_attribute', 'get_module_file_attribute', (['"""PyQt5"""'], {}), "('PyQt5')\n", (1117, 1126), False, 'from PyInstaller.utils.hooks import get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files\n')] |
from vyper import ast as vy_ast
def test_output_class():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert isinstance(new_node, vy_ast.Int)
def test_source():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.src == new_node.src
assert old_node.node_source_code == new_node.node_source_code
def test_kwargs():
old_node = vy_ast.parse_to_ast("42").body[0].value
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.value == 42
assert new_node.value == 666
def test_compare_nodes():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert not vy_ast.compare_nodes(old_node, new_node)
def test_new_node_has_no_parent():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert new_node._parent is None
assert new_node._depth == 0
| [
"vyper.ast.parse_to_ast",
"vyper.ast.Int.from_node",
"vyper.ast.compare_nodes"
] | [((74, 105), 'vyper.ast.parse_to_ast', 'vy_ast.parse_to_ast', (['"""foo = 42"""'], {}), "('foo = 42')\n", (93, 105), True, 'from vyper import ast as vy_ast\n'), ((121, 162), 'vyper.ast.Int.from_node', 'vy_ast.Int.from_node', (['old_node'], {'value': '(666)'}), '(old_node, value=666)\n', (141, 162), True, 'from vyper import ast as vy_ast\n'), ((244, 275), 'vyper.ast.parse_to_ast', 'vy_ast.parse_to_ast', (['"""foo = 42"""'], {}), "('foo = 42')\n", (263, 275), True, 'from vyper import ast as vy_ast\n'), ((291, 332), 'vyper.ast.Int.from_node', 'vy_ast.Int.from_node', (['old_node'], {'value': '(666)'}), '(old_node, value=666)\n', (311, 332), True, 'from vyper import ast as vy_ast\n'), ((531, 572), 'vyper.ast.Int.from_node', 'vy_ast.Int.from_node', (['old_node'], {'value': '(666)'}), '(old_node, value=666)\n', (551, 572), True, 'from vyper import ast as vy_ast\n'), ((682, 713), 'vyper.ast.parse_to_ast', 'vy_ast.parse_to_ast', (['"""foo = 42"""'], {}), "('foo = 42')\n", (701, 713), True, 'from vyper import ast as vy_ast\n'), ((729, 770), 'vyper.ast.Int.from_node', 'vy_ast.Int.from_node', (['old_node'], {'value': '(666)'}), '(old_node, value=666)\n', (749, 770), True, 'from vyper import ast as vy_ast\n'), ((880, 911), 'vyper.ast.parse_to_ast', 'vy_ast.parse_to_ast', (['"""foo = 42"""'], {}), "('foo = 42')\n", (899, 911), True, 'from vyper import ast as vy_ast\n'), ((927, 968), 'vyper.ast.Int.from_node', 'vy_ast.Int.from_node', (['old_node'], {'value': '(666)'}), '(old_node, value=666)\n', (947, 968), True, 'from vyper import ast as vy_ast\n'), ((787, 827), 'vyper.ast.compare_nodes', 'vy_ast.compare_nodes', (['old_node', 'new_node'], {}), '(old_node, new_node)\n', (807, 827), True, 'from vyper import ast as vy_ast\n'), ((476, 501), 'vyper.ast.parse_to_ast', 'vy_ast.parse_to_ast', (['"""42"""'], {}), "('42')\n", (495, 501), True, 'from vyper import ast as vy_ast\n')] |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=too-many-lines
"""Unittests for REST API."""
import tempfile
from flask_cors.core import ACL_ORIGIN
from aiida import orm
from aiida.backends.testbase import AiidaTestCase
from aiida.common import json
from aiida.common.links import LinkType
from aiida.restapi.run_api import configure_api
class RESTApiTestCase(AiidaTestCase):
"""
Setup of the tests for the AiiDA RESTful-api
"""
_url_prefix = '/api/v4'
_dummy_data = {}
_PERPAGE_DEFAULT = 20
_LIMIT_DEFAULT = 400
@classmethod
def setUpClass(cls, *args, **kwargs): # pylint: disable=too-many-locals, too-many-statements
"""
Add objects to the database for different requests/filters/orderings etc.
"""
super().setUpClass()
api = configure_api(catch_internal_server=True)
cls.app = api.app
cls.app.config['TESTING'] = True
# create test inputs
cell = ((2., 0., 0.), (0., 2., 0.), (0., 0., 2.))
structure = orm.StructureData(cell=cell)
structure.append_atom(position=(0., 0., 0.), symbols=['Ba'])
structure.store()
structure.add_comment('This is test comment.')
structure.add_comment('Add another comment.')
cif = orm.CifData(ase=structure.get_ase())
cif.store()
parameter1 = orm.Dict(dict={'a': 1, 'b': 2})
parameter1.store()
parameter2 = orm.Dict(dict={'c': 3, 'd': 4})
parameter2.store()
kpoint = orm.KpointsData()
kpoint.set_kpoints_mesh([4, 4, 4])
kpoint.store()
resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}
calcfunc = orm.CalcFunctionNode(computer=cls.computer)
calcfunc.store()
calc = orm.CalcJobNode(computer=cls.computer)
calc.set_option('resources', resources)
calc.set_attribute('attr1', 'OK')
calc.set_attribute('attr2', 'OK')
calc.set_extra('extra1', False)
calc.set_extra('extra2', 'extra_info')
calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure')
calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter')
aiida_in = 'The input file\nof the CalcJob node'
# Add the calcjob_inputs folder with the aiida.in file to the CalcJobNode repository
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(aiida_in)
handle.flush()
handle.seek(0)
calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True)
calc.store()
# create log message for calcjob
import logging
from aiida.common.log import LOG_LEVEL_REPORT
from aiida.common.timezone import now
from aiida.orm import Log
log_record = {
'time': now(),
'loggername': 'loggername',
'levelname': logging.getLevelName(LOG_LEVEL_REPORT),
'dbnode_id': calc.id,
'message': 'This is a template record message',
'metadata': {
'content': 'test'
},
}
Log(**log_record)
aiida_out = 'The output file\nof the CalcJob node'
retrieved_outputs = orm.FolderData()
# Add the calcjob_outputs folder with the aiida.out file to the FolderData node
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(aiida_out)
handle.flush()
handle.seek(0)
retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True)
retrieved_outputs.store()
retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved')
kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create')
calc1 = orm.CalcJobNode(computer=cls.computer)
calc1.set_option('resources', resources)
calc1.store()
dummy_computers = [{
'label': 'test1',
'hostname': 'test1.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'pbspro',
}, {
'label': 'test2',
'hostname': 'test2.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'torque',
}, {
'label': 'test3',
'hostname': 'test3.epfl.ch',
'transport_type': 'local',
'scheduler_type': 'slurm',
}, {
'label': 'test4',
'hostname': 'test4.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'slurm',
}]
for dummy_computer in dummy_computers:
computer = orm.Computer(**dummy_computer)
computer.store()
# Prepare typical REST responses
cls.process_dummy_data()
def get_dummy_data(self):
return self._dummy_data
def get_url_prefix(self):
return self._url_prefix
@classmethod
def process_dummy_data(cls):
# pylint: disable=fixme
"""
This functions prepare atomic chunks of typical responses from the
RESTapi and puts them into class attributes
"""
# TODO: Storing the different nodes as lists and accessing them
# by their list index is very fragile and a pain to debug.
# Please change this!
computer_projections = ['id', 'uuid', 'name', 'hostname', 'transport_type', 'scheduler_type']
computers = orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({
'comp': [{
'id': {
'order': 'asc'
}
}]
}).dict()
# Cast UUID into a string (e.g. in sqlalchemy it comes as a UUID object)
computers = [_['comp'] for _ in computers]
for comp in computers:
if comp['uuid'] is not None:
comp['uuid'] = str(comp['uuid'])
cls._dummy_data['computers'] = computers
calculation_projections = ['id', 'uuid', 'user_id', 'node_type']
calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc',
project=calculation_projections).order_by({
'calc': [{
'id': {
'order': 'desc'
}
}]
}).dict()
calculations = [_['calc'] for _ in calculations]
for calc in calculations:
if calc['uuid'] is not None:
calc['uuid'] = str(calc['uuid'])
cls._dummy_data['calculations'] = calculations
data_projections = ['id', 'uuid', 'user_id', 'node_type']
data_types = {
'cifdata': orm.CifData,
'parameterdata': orm.Dict,
'structuredata': orm.StructureData,
'data': orm.Data,
}
for label, dataclass in data_types.items():
data = orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({
'data': [{
'id': {
'order': 'desc'
}
}]
}).dict()
data = [_['data'] for _ in data]
for datum in data:
if datum['uuid'] is not None:
datum['uuid'] = str(datum['uuid'])
cls._dummy_data[label] = data
def split_path(self, url):
# pylint: disable=no-self-use
"""
Split the url with "?" to get url path and it's parameters
:param url: Web url
:return: url path and url parameters
"""
parts = url.split('?')
path = ''
query_string = ''
if parts:
path = parts[0]
if len(parts) > 1:
query_string = parts[1]
return path, query_string
def compare_extra_response_data(self, node_type, url, response, uuid=None):
"""
In url response, we pass some extra information/data along with the node
results. e.g. url method, node_type, path, pk, query_string, url,
url_root,
etc.
:param node_type: url requested fot the type of the node
:param url: web url
:param response: url response
:param uuid: url requested for the node pk
"""
path, query_string = self.split_path(url)
self.assertEqual(response['method'], 'GET')
self.assertEqual(response['resource_type'], node_type)
self.assertEqual(response['path'], path)
self.assertEqual(response['id'], uuid)
self.assertEqual(response['query_string'], query_string)
self.assertEqual(response['url'], f'http://localhost{url}')
self.assertEqual(response['url_root'], 'http://localhost/')
# node details and list with limit, offset, page, perpage
def process_test(
self,
entity_type,
url,
full_list=False,
empty_list=False,
expected_list_ids=None,
expected_range=None,
expected_errormsg=None,
uuid=None,
result_node_type=None,
result_name=None
):
# pylint: disable=too-many-arguments
"""
Check whether response matches expected values.
:param entity_type: url requested for the type of the node
:param url: web url
:param full_list: if url is requested to get full list
:param empty_list: if the response list is empty
:param expected_list_ids: list of expected ids from data
:param expected_range: [start, stop] range of expected ids from data
:param expected_errormsg: expected error message in response
:param uuid: url requested for the node pk
:param result_node_type: node type in response data
:param result_name: result name in response e.g. incoming, outgoing
"""
if expected_list_ids is None:
expected_list_ids = []
if expected_range is None:
expected_range = []
if result_node_type is None and result_name is None:
result_node_type = entity_type
result_name = entity_type
url = self._url_prefix + url
with self.app.test_client() as client:
rv_response = client.get(url)
response = json.loads(rv_response.data)
if expected_errormsg:
self.assertEqual(response['message'], expected_errormsg)
else:
if full_list:
expected_data = self._dummy_data[result_node_type]
elif empty_list:
expected_data = []
elif expected_list_ids:
expected_data = [self._dummy_data[result_node_type][i] for i in expected_list_ids]
elif expected_range != []:
expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]]
else:
from aiida.common.exceptions import InputValidationError
raise InputValidationError('Pass the expected range of the dummydata')
expected_node_uuids = [node['uuid'] for node in expected_data]
result_node_uuids = [node['uuid'] for node in response['data'][result_name]]
self.assertEqual(expected_node_uuids, result_node_uuids)
self.compare_extra_response_data(entity_type, url, response, uuid)
class RESTApiTestSuite(RESTApiTestCase):
# pylint: disable=too-many-public-methods
"""
Define unittests for rest api
"""
############### generic endpoints ########################
def test_server(self):
"""
Test that /server endpoint returns AiiDA version
"""
url = f'{self.get_url_prefix()}/server'
from aiida import __version__
with self.app.test_client() as client:
response = client.get(url)
data = json.loads(response.data)['data']
self.assertEqual(__version__, data['AiiDA_version'])
self.assertEqual(self.get_url_prefix(), data['API_prefix'])
def test_base_url(self):
"""
Test that / returns list of endpoints
"""
with self.app.test_client() as client:
data_base = json.loads(client.get(self.get_url_prefix() + '/').data)['data']
data_server = json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data']
self.assertTrue(len(data_base['available_endpoints']) > 0)
self.assertDictEqual(data_base, data_server)
def test_cors_headers(self):
"""
Test that REST API sets cross-origin resource sharing headers
"""
url = f'{self.get_url_prefix()}/server'
with self.app.test_client() as client:
response = client.get(url)
headers = response.headers
self.assertEqual(headers.get(ACL_ORIGIN), '*')
############### computers endpoint ########################
def test_computers_details(self):
"""
Requests the details of single computer
"""
node_uuid = self.get_dummy_data()['computers'][1]['uuid']
RESTApiTestCase.process_test(
self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid
)
def test_computers_list(self):
"""
Get the full list of computers from database
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True)
def test_computers_list_limit_offset(self):
"""
Get the list of computers from database using limit
and offset parameter.
It should return the no of rows specified in limit from
database starting from the no. specified in offset
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4]
)
def test_computers_list_limit_only(self):
"""
Get the list of computers from database using limit
parameter.
It should return the no of rows specified in limit from
database.
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2])
def test_computers_list_offset_only(self):
"""
Get the list of computers from database using offset
parameter
It should return all the rows from database starting from
the no. specified in offset
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None])
def test_computers_list_limit_offset_perpage(self):
"""
If we pass the limit, offset and perpage at same time, it
would return the error message.
"""
expected_error = 'perpage key is incompatible with limit and offset'
RESTApiTestCase.process_test(
self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error
)
def test_computers_list_page_limit_offset(self):
"""
If we use the page, limit and offset at same time, it
would return the error message.
"""
expected_error = 'requesting a specific page is incompatible with ' \
'limit and offset'
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error
)
def test_complist_pagelimitoffset_perpage(self):
"""
If we use the page, limit, offset and perpage at same time, it
would return the error message.
"""
expected_error = 'perpage key is incompatible with limit and offset'
RESTApiTestCase.process_test(
self,
'computers',
'/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id',
expected_errormsg=expected_error
)
def test_computers_list_page_default(self):
"""
it returns the no. of rows defined as default perpage option
from database.
no.of pages = total no. of computers in database / perpage
"/page" acts as "/page/1?perpage=default_value"
"""
RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True)
def test_computers_list_page_perpage(self):
"""
no.of pages = total no. of computers in database / perpage
Using this formula it returns the no. of rows for requested page
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2]
)
def test_computers_list_page_perpage_exceed(self):
"""
no.of pages = total no. of computers in database / perpage
If we request the page which exceeds the total no. of pages then
it would return the error message.
"""
expected_error = 'Non existent page requested. The page range is [1 : ' \
'3]'
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error
)
############### list filters ########################
def test_computers_filter_id1(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id=1)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1])
def test_computers_filter_id2(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id > 2)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None]
)
def test_computers_filter_pk(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id=1)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1])
def test_computers_filter_name(self):
"""
Add filter for the name of computer and get the filtered computer
list
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?name="test1"', expected_list_ids=[1])
def test_computers_filter_hostname(self):
"""
Add filter for the hostname of computer and get the filtered computer
list
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?hostname="test1.epfl.ch"', expected_list_ids=[1])
def test_computers_filter_transport_type(self):
"""
Add filter for the transport_type of computer and get the filtered
computer
list
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers?transport_type="local"&name="test3"&orderby=+id', expected_list_ids=[3]
)
############### list orderby ########################
def test_computers_orderby_id_asc(self):
"""
Returns the computers list ordered by "id" in ascending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True)
def test_computers_orderby_id_asc_sign(self):
"""
Returns the computers list ordered by "+id" in ascending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True)
def test_computers_orderby_id_desc(self):
"""
Returns the computers list ordered by "id" in descending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4, 3, 2, 1, 0])
def test_computers_orderby_name_asc(self):
"""
Returns the computers list ordered by "name" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3, 4]
)
def test_computers_orderby_name_asc_sign(self):
"""
Returns the computers list ordered by "+name" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3, 4]
)
def test_computers_orderby_name_desc(self):
"""
Returns the computers list ordered by "name" in descending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2, 1]
)
def test_computers_orderby_scheduler_type_asc(self):
"""
Returns the computers list ordered by "scheduler_type" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?transport_type=\"ssh\"&pk>{str(node_pk)}&orderby=scheduler_type",
expected_list_ids=[1, 4, 2]
)
def test_comp_orderby_scheduler_ascsign(self):
"""
Returns the computers list ordered by "+scheduler_type" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?transport_type=\"ssh\"&pk>{str(node_pk)}&orderby=+scheduler_type",
expected_list_ids=[1, 4, 2]
)
def test_computers_orderby_schedulertype_desc(self):
"""
Returns the computers list ordered by "scheduler_type" in descending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?pk>{str(node_pk)}&transport_type=\"ssh\"&orderby=-scheduler_type",
expected_list_ids=[2, 4, 1]
)
############### list orderby combinations #######################
def test_computers_orderby_mixed1(self):
"""
Returns the computers list first order by "transport_type" in
ascending order and if it is having same transport_type, order it
by "id"
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f'/computers?pk>{str(node_pk)}&orderby=transport_type,id',
expected_list_ids=[3, 1, 2, 4]
)
def test_computers_orderby_mixed2(self):
"""
Returns the computers list first order by "scheduler_type" in
descending order and if it is having same scheduler_type, order it
by "name"
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name',
expected_list_ids=[2, 3, 4, 1]
)
def test_computers_orderby_mixed3(self):
"""
Returns the computers list first order by "scheduler_type" in
ascending order and if it is having same scheduler_type, order it
by "hostname" descending order
Response::
test4 slurm
test3 slurm
test2 torque
test1 pbspro
localhost pbspro
==========
Expected::
test1 pbspro
localhost pbspro
test4 slurm
test3 slurm
test2 torque
test1 test4
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=+scheduler_type,
-hostname",
expected_list_ids=[1,0,4,3,2])
"""
############### list filter combinations #######################
def test_computers_filter_mixed1(self):
"""
Add filter for the hostname and id of computer and get the
filtered computer list
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f"/computers?id>{str(node_pk)}&hostname=\"test1.epfl.ch\"", expected_list_ids=[1]
)
def test_computers_filter_mixed2(self):
"""
Add filter for the id, hostname and transport_type of the computer
and get the filtered computer list
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?id>{str(node_pk)}&hostname=\"test3.epfl.ch\"&transport_type=\"ssh\"",
empty_list=True
)
############### list all parameter combinations #######################
def test_computers_mixed1(self):
"""
url parameters: id, limit and offset
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4]
)
def test_computers_mixed2(self):
"""
url parameters: id, page, perpage
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4]
)
def test_computers_mixed3(self):
"""
url parameters: id, transport_type, orderby
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?id>={str(node_pk)}&transport_type=\"ssh\"&orderby=-id&limit=2",
expected_list_ids=[4, 2]
)
########## pass unknown url parameter ###########
def test_computers_unknown_param(self):
"""
url parameters: id, limit and offset
from aiida.common.exceptions import InputValidationError
RESTApiTestCase.node_exception(self, "/computers?aa=bb&id=2", InputValidationError)
"""
############### calculation retrieved_inputs and retrieved_outputs #############
def test_calculation_retrieved_inputs(self):
"""
Get the list of given calculation retrieved_inputs
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}])
def test_calculation_retrieved_outputs(self):
"""
Get the list of given calculation retrieved_outputs
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}])
############### calculation incoming #############
def test_calculation_inputs(self):
"""
Get the list of give calculation incoming
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
self.process_test(
'nodes',
f'/nodes/{str(node_uuid)}/links/incoming?orderby=id',
expected_list_ids=[5, 3],
uuid=node_uuid,
result_node_type='data',
result_name='incoming'
)
def test_calculation_input_filters(self):
"""
Get filtered incoming list for given calculations
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
self.process_test(
'nodes',
f"/nodes/{str(node_uuid)}/links/incoming?node_type=\"data.dict.Dict.\"",
expected_list_ids=[3],
uuid=node_uuid,
result_node_type='data',
result_name='incoming'
)
def test_calculation_iotree(self):
"""
Get filtered incoming list for given calculations
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(len(response['data']['nodes']), 1)
self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1)
self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1)
self.assertEqual(len(response['data']['metadata']), 1)
expected_attr = [
'ctime', 'mtime', 'id', 'node_label', 'node_type', 'uuid', 'description', 'incoming', 'outgoing'
]
received_attr = response['data']['nodes'][0].keys()
for attr in expected_attr:
self.assertIn(attr, received_attr)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
############### calculation attributes #############
def test_calculation_attributes(self):
"""
Get list of calculation attributes
"""
attributes = {
'attr1': 'OK',
'attr2': 'OK',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(response['data']['attributes'], attributes)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
def test_contents_attributes_filter(self):
"""
Get list of calculation attributes with filter attributes_filter
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\"attr1\""
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(response['data']['attributes'], {'attr1': 'OK'})
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
############### calculation node attributes filter #############
def test_calculation_attributes_filter(self):
"""
Get the list of given calculation attributes filtered
"""
attributes = {
'attr1': 'OK',
'attr2': 'OK',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['nodes'][0]['attributes'], attributes)
############### calculation node extras_filter #############
def test_calculation_extras_filter(self):
"""
Get the list of given calculation extras filtered
"""
extras = {'extra1': False, 'extra2': 'extra_info'}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1'])
self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2'])
############### structure node attributes filter #############
def test_structure_attributes_filter(self):
"""
Get the list of given calculation attributes filtered
"""
cell = [[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell)
############### node attributes_filter with pagination #############
def test_node_attributes_filter_pagination(self):
"""
Check that node attributes specified in attributes_filter are
returned as a dictionary when pagination is set
"""
expected_attributes = ['resources', 'cell']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertIn('attributes', node)
self.assertNotIn('attributes.resources', node)
self.assertNotIn('attributes.cell', node)
self.assertEqual(len(node['attributes']), len(expected_attributes))
for attr in expected_attributes:
self.assertIn(attr, node['attributes'])
############### node get one attributes_filter with pagination #############
def test_node_single_attributes_filter(self):
"""
Check that when only one node attribute is specified in attributes_filter
only this attribute is returned as a dictionary when pagination is set
"""
expected_attribute = ['resources']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertEqual(list(node['attributes'].keys()), expected_attribute)
############### node extras_filter with pagination #############
def test_node_extras_filter_pagination(self):
"""
Check that node extras specified in extras_filter are
returned as a dictionary when pagination is set
"""
expected_extras = ['extra1', 'extra2']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertIn('extras', node)
self.assertNotIn('extras.extra1', node)
self.assertNotIn('extras.extra2', node)
self.assertEqual(len(node['extras']), len(expected_extras))
for extra in expected_extras:
self.assertIn(extra, node['extras'])
############### node get one extras_filter with pagination #############
def test_node_single_extras_filter(self):
"""
Check that when only one node extra is specified in extras_filter
only this extra is returned as a dictionary when pagination is set
"""
expected_extra = ['extra2']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertEqual(list(node['extras'].keys()), expected_extra)
############### node full_type filter #############
def test_nodes_full_type_filter(self):
"""
Get the list of nodes filtered by full_type
"""
expected_node_uuids = []
for calc in self.get_dummy_data()['calculations']:
if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.':
expected_node_uuids.append(calc['uuid'])
url = f"{self.get_url_prefix()}/nodes/?full_type=\"process.calculation.calcjob.CalcJobNode.|\""
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
for node in response['data']['nodes']:
self.assertIn(node['uuid'], expected_node_uuids)
############### Structure visualization and download #############
def test_structure_derived_properties(self):
"""
Get the list of give calculation incoming
"""
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(
response['data']['derived_properties']['dimensionality'], {
'dim': 3,
'value': 8.0,
'label': 'volume'
}
)
self.assertEqual(response['data']['derived_properties']['formula'], 'Ba')
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
def test_structure_download(self):
"""
Test download of structure file
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf'
with self.app.test_client() as client:
rv_obj = client.get(url)
structure_data = load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access
self.assertEqual(rv_obj.data, structure_data)
def test_cif(self):
"""
Test download of cif file
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['cifdata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif'
with self.app.test_client() as client:
rv_obj = client.get(url)
cif = load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access
self.assertEqual(rv_obj.data, cif)
############### projectable_properties #############
def test_projectable_properties(self):
"""
test projectable_properties endpoint
"""
for nodetype in ['nodes', 'processes', 'computers', 'users', 'groups']:
url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
expected_keys = ['display_name', 'help_text', 'is_display', 'is_foreign_key', 'type']
# check fields
for _, pinfo in response['data']['fields'].items():
available_keys = pinfo.keys()
for prop in expected_keys:
self.assertIn(prop, available_keys)
# check order
available_properties = response['data']['fields'].keys()
for prop in response['data']['ordering']:
self.assertIn(prop, available_properties)
def test_node_namespace(self):
"""
Test the rest api call to get list of available node namespace
"""
url = f'{self.get_url_prefix()}/nodes/full_types'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
expected_data_keys = ['path', 'namespace', 'subspaces', 'label', 'full_type']
response_keys = response['data'].keys()
for dkay in expected_data_keys:
self.assertIn(dkay, response_keys)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response)
def test_comments(self):
"""
Get the node comments
"""
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)['data']['comments']
all_comments = []
for comment in response:
all_comments.append(comment['message'])
self.assertEqual(sorted(all_comments), sorted(['This is test comment.', 'Add another comment.']))
def test_repo(self):
"""
Test to get repo list or repo file contents for given node
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\"calcjob_inputs\""
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name': 'aiida.in'}])
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\"calcjob_inputs/aiida.in\""
with self.app.test_client() as client:
response_obj = client.get(url)
input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb')
self.assertEqual(response_obj.data, input_file)
def test_process_report(self):
"""
Test process report
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
expected_keys = response['data'].keys()
for key in ['logs']:
self.assertIn(key, expected_keys)
expected_log_keys = response['data']['logs'][0].keys()
for key in ['time', 'loggername', 'levelname', 'dbnode_id', 'message']:
self.assertIn(key, expected_log_keys)
def test_download_formats(self):
"""
test for download format endpoint
"""
url = f'{self.get_url_prefix()}/nodes/download_formats'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
for key in ['data.structure.StructureData.|', 'data.cif.CifData.|']:
self.assertIn(key, response['data'].keys())
for key in ['cif', 'xsf', 'xyz']:
self.assertIn(key, response['data']['data.structure.StructureData.|'])
self.assertIn('cif', response['data']['data.cif.CifData.|'])
| [
"aiida.orm.Dict",
"aiida.orm.load_node",
"aiida.orm.QueryBuilder",
"aiida.orm.Computer",
"aiida.orm.FolderData",
"aiida.common.exceptions.InputValidationError",
"aiida.orm.CalcFunctionNode",
"aiida.orm.Log",
"aiida.orm.CalcJobNode",
"aiida.restapi.run_api.configure_api",
"aiida.orm.StructureData",
"aiida.common.timezone.now",
"logging.getLevelName",
"tempfile.NamedTemporaryFile",
"aiida.common.json.loads",
"aiida.orm.KpointsData"
] | [((1413, 1454), 'aiida.restapi.run_api.configure_api', 'configure_api', ([], {'catch_internal_server': '(True)'}), '(catch_internal_server=True)\n', (1426, 1454), False, 'from aiida.restapi.run_api import configure_api\n'), ((1630, 1658), 'aiida.orm.StructureData', 'orm.StructureData', ([], {'cell': 'cell'}), '(cell=cell)\n', (1647, 1658), False, 'from aiida import orm\n'), ((1957, 1988), 'aiida.orm.Dict', 'orm.Dict', ([], {'dict': "{'a': 1, 'b': 2}"}), "(dict={'a': 1, 'b': 2})\n", (1965, 1988), False, 'from aiida import orm\n'), ((2038, 2069), 'aiida.orm.Dict', 'orm.Dict', ([], {'dict': "{'c': 3, 'd': 4}"}), "(dict={'c': 3, 'd': 4})\n", (2046, 2069), False, 'from aiida import orm\n'), ((2115, 2132), 'aiida.orm.KpointsData', 'orm.KpointsData', ([], {}), '()\n', (2130, 2132), False, 'from aiida import orm\n'), ((2291, 2334), 'aiida.orm.CalcFunctionNode', 'orm.CalcFunctionNode', ([], {'computer': 'cls.computer'}), '(computer=cls.computer)\n', (2311, 2334), False, 'from aiida import orm\n'), ((2376, 2414), 'aiida.orm.CalcJobNode', 'orm.CalcJobNode', ([], {'computer': 'cls.computer'}), '(computer=cls.computer)\n', (2391, 2414), False, 'from aiida import orm\n'), ((3785, 3802), 'aiida.orm.Log', 'Log', ([], {}), '(**log_record)\n', (3788, 3802), False, 'from aiida.orm import Log\n'), ((3891, 3907), 'aiida.orm.FolderData', 'orm.FolderData', ([], {}), '()\n', (3905, 3907), False, 'from aiida import orm\n'), ((4483, 4521), 'aiida.orm.CalcJobNode', 'orm.CalcJobNode', ([], {'computer': 'cls.computer'}), '(computer=cls.computer)\n', (4498, 4521), False, 'from aiida import orm\n'), ((2994, 3032), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+"""'}), "(mode='w+')\n", (3021, 3032), False, 'import tempfile\n'), ((3486, 3491), 'aiida.common.timezone.now', 'now', ([], {}), '()\n', (3489, 3491), False, 'from aiida.common.timezone import now\n'), ((3558, 3596), 'logging.getLevelName', 'logging.getLevelName', (['LOG_LEVEL_REPORT'], {}), '(LOG_LEVEL_REPORT)\n', (3578, 3596), False, 'import logging\n'), ((4009, 4047), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+"""'}), "(mode='w+')\n", (4036, 4047), False, 'import tempfile\n'), ((5336, 5366), 'aiida.orm.Computer', 'orm.Computer', ([], {}), '(**dummy_computer)\n', (5348, 5366), False, 'from aiida import orm\n'), ((11231, 11259), 'aiida.common.json.loads', 'json.loads', (['rv_response.data'], {}), '(rv_response.data)\n', (11241, 11259), False, 'from aiida.common import json\n'), ((28299, 28330), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (28309, 28330), False, 'from aiida.common import json\n'), ((28828, 28859), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (28838, 28859), False, 'from aiida.common import json\n'), ((30340, 30371), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (30350, 30371), False, 'from aiida.common import json\n'), ((31712, 31735), 'aiida.common.json.loads', 'json.loads', (['rv_obj.data'], {}), '(rv_obj.data)\n', (31722, 31735), False, 'from aiida.common import json\n'), ((32394, 32417), 'aiida.common.json.loads', 'json.loads', (['rv_obj.data'], {}), '(rv_obj.data)\n', (32404, 32417), False, 'from aiida.common import json\n'), ((33329, 33360), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (33339, 33360), False, 'from aiida.common import json\n'), ((33988, 34019), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (33998, 34019), False, 'from aiida.common import json\n'), ((34754, 34777), 'aiida.common.json.loads', 'json.loads', (['rv_obj.data'], {}), '(rv_obj.data)\n', (34764, 34777), False, 'from aiida.common import json\n'), ((35424, 35455), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (35434, 35455), False, 'from aiida.common import json\n'), ((36522, 36553), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (36532, 36553), False, 'from aiida.common import json\n'), ((37287, 37318), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (37297, 37318), False, 'from aiida.common import json\n'), ((38320, 38351), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (38330, 38351), False, 'from aiida.common import json\n'), ((39165, 39188), 'aiida.common.json.loads', 'json.loads', (['rv_obj.data'], {}), '(rv_obj.data)\n', (39175, 39188), False, 'from aiida.common import json\n'), ((39769, 39792), 'aiida.common.json.loads', 'json.loads', (['rv_obj.data'], {}), '(rv_obj.data)\n', (39779, 39792), False, 'from aiida.common import json\n'), ((42717, 42740), 'aiida.common.json.loads', 'json.loads', (['rv_obj.data'], {}), '(rv_obj.data)\n', (42727, 42740), False, 'from aiida.common import json\n'), ((44128, 44159), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (44138, 44159), False, 'from aiida.common import json\n'), ((44976, 45007), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (44986, 45007), False, 'from aiida.common import json\n'), ((45633, 45664), 'aiida.common.json.loads', 'json.loads', (['response_value.data'], {}), '(response_value.data)\n', (45643, 45664), False, 'from aiida.common import json\n'), ((12877, 12902), 'aiida.common.json.loads', 'json.loads', (['response.data'], {}), '(response.data)\n', (12887, 12902), False, 'from aiida.common import json\n'), ((41759, 41782), 'aiida.common.json.loads', 'json.loads', (['rv_obj.data'], {}), '(rv_obj.data)\n', (41769, 41782), False, 'from aiida.common import json\n'), ((40683, 40703), 'aiida.orm.load_node', 'load_node', (['node_uuid'], {}), '(node_uuid)\n', (40692, 40703), False, 'from aiida.orm import load_node\n'), ((41193, 41213), 'aiida.orm.load_node', 'load_node', (['node_uuid'], {}), '(node_uuid)\n', (41202, 41213), False, 'from aiida.orm import load_node\n'), ((43407, 43430), 'aiida.common.json.loads', 'json.loads', (['rv_obj.data'], {}), '(rv_obj.data)\n', (43417, 43430), False, 'from aiida.common import json\n'), ((44491, 44511), 'aiida.orm.load_node', 'load_node', (['node_uuid'], {}), '(node_uuid)\n', (44500, 44511), False, 'from aiida.orm import load_node\n'), ((6123, 6141), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (6139, 6141), False, 'from aiida import orm\n'), ((6749, 6767), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (6765, 6767), False, 'from aiida import orm\n'), ((11978, 12042), 'aiida.common.exceptions.InputValidationError', 'InputValidationError', (['"""Pass the expected range of the dummydata"""'], {}), "('Pass the expected range of the dummydata')\n", (11998, 12042), False, 'from aiida.common.exceptions import InputValidationError\n'), ((7842, 7860), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (7858, 7860), False, 'from aiida import orm\n')] |
# https://www.acmicpc.net/problem/13023
import sys
sys.setrecursionlimit(999999999)
def dfs_all():
is_possible = [False]
for node in range(N):
visited = [False for _ in range(N)]
dfs(node, 0, visited, is_possible)
if is_possible[0]:
return 1
return 0
def dfs(cur, depth, visited, is_possible):
if visited[cur]:
return
if depth == target_depth:
is_possible[0] = True
return
visited[cur] = True
for nxt in graph[cur]:
dfs(nxt, depth + 1, visited, is_possible)
visited[cur] = False
if __name__ == '__main__':
input = __import__('sys').stdin.readline
target_depth = 4
N, M = map(int, input().split())
graph = [list() for _ in range(N)]
for _ in range(M):
a, b = map(int, input().split())
graph[a].append(b)
graph[b].append(a)
print(dfs_all())
| [
"sys.setrecursionlimit"
] | [((52, 84), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(999999999)'], {}), '(999999999)\n', (73, 84), False, 'import sys\n')] |
# This file is part of postcipes
# (c) <NAME>
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
from scipy.interpolate import interp1d
import numpy as np
import h5py
__all__ = ["HydraulicJump"]
class HydraulicJump(Postcipe):
def __init__(self, path):
Postcipe.__init__(self)
self.case = tbl.Case(path)
self.case['alphag'] = 1 - self.case['alpha.waterMean']
self.U = self.case.boundary_data("inlet", sort="y")[1]['UMean'][0, 0]
y_inlet = self.case.boundary_data("inlet", sort="y")[0][:, 1]
inlet_edge_length = tbl.edge_lengths(self.case, "inlet")
self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1]
self.Fr1 = self.U/np.sqrt(9.81*self.d)
self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2
self.Fr2 = self.U/np.sqrt(9.81*self.d2)
iso05 = tbl.isoline(self.case, "alpha.waterMean", 0.5)
idx = iso05[:, 0].argsort()
self.xfs = iso05[idx, 0]
self.yfs = iso05[idx, 1]
idx_toe = np.argmin(np.abs(self.d*1.1 - self.yfs[:int(self.yfs.size/2)]))
self.xtoe = self.xfs[idx_toe]
| [
"turbulucid.Case",
"turbulucid.edge_lengths",
"numpy.sqrt",
"turbulucid.isoline"
] | [((548, 562), 'turbulucid.Case', 'tbl.Case', (['path'], {}), '(path)\n', (556, 562), True, 'import turbulucid as tbl\n'), ((803, 839), 'turbulucid.edge_lengths', 'tbl.edge_lengths', (['self.case', '"""inlet"""'], {}), "(self.case, 'inlet')\n", (819, 839), True, 'import turbulucid as tbl\n'), ((1069, 1115), 'turbulucid.isoline', 'tbl.isoline', (['self.case', '"""alpha.waterMean"""', '(0.5)'], {}), "(self.case, 'alpha.waterMean', 0.5)\n", (1080, 1115), True, 'import turbulucid as tbl\n'), ((923, 945), 'numpy.sqrt', 'np.sqrt', (['(9.81 * self.d)'], {}), '(9.81 * self.d)\n', (930, 945), True, 'import numpy as np\n'), ((1030, 1053), 'numpy.sqrt', 'np.sqrt', (['(9.81 * self.d2)'], {}), '(9.81 * self.d2)\n', (1037, 1053), True, 'import numpy as np\n'), ((970, 1000), 'numpy.sqrt', 'np.sqrt', (['(1 + 8 * self.Fr1 ** 2)'], {}), '(1 + 8 * self.Fr1 ** 2)\n', (977, 1000), True, 'import numpy as np\n')] |
# vi: ts=4 sw=4
'''AreaDetector Devices
`areaDetector`_ detector abstractions
.. _areaDetector: https://areadetector.github.io/master/index.html
'''
import warnings
from .base import (ADBase, ADComponent as C)
from . import cam
__all__ = ['DetectorBase',
'AreaDetector',
'AdscDetector',
'Andor3Detector',
'AndorDetector',
'BrukerDetector',
'DexelaDetector',
'EmergentVisionDetector',
'EigerDetector',
'FirewireLinDetector',
'FirewireWinDetector',
'GreatEyesDetector',
'LightFieldDetector',
'Mar345Detector',
'MarCCDDetector',
'PSLDetector',
'PerkinElmerDetector',
'PICamDetector',
'PilatusDetector',
'PixiradDetector',
'PointGreyDetector',
'ProsilicaDetector',
'PvcamDetector',
'RoperDetector',
'SimDetector',
'URLDetector',
'UVCDetector',
'Xspress3Detector'
]
class DetectorBase(ADBase):
"""
The base class for the hardware-specific classes that follow.
Note that Plugin also inherits from ADBase.
This adds some AD-specific methods that are not shared by the plugins.
"""
_default_configuration_attrs = (ADBase._default_configuration_attrs +
('cam', ))
def generate_datum(self, key, timestamp, datum_kwargs=None):
"""
Notify plugins of acquisition being complete.
When a new acquisition is started, this method is called with a
key which is a label like 'light', 'dark', or 'gain8'.
It in turn calls ``generate_datum`` on all of the plugins that have
that method.
File plugins are identified by searching for a
:meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum`
method that must have the signature ::
def generate_datum(key: str, timestamp: float, datum_kwargs: dict):
...
Parameters
----------
key : str
The label for the datum that should be generated
timestamp : float
The time of the trigger
datum_kwargs : Dict[str, Any], optional
Any datum kwargs that should go to all children.
"""
if datum_kwargs is None:
datum_kwargs = {}
file_plugins = [s for s in self._signals.values() if
hasattr(s, 'generate_datum')]
for p in file_plugins:
if p.enable.get():
p.generate_datum(key, timestamp, datum_kwargs)
def dispatch(self, key, timestamp):
warnings.warn(
".dispatch is deprecated, use .generate_datum instead",
stacklevel=2
)
return self.generate_datum(key, timestamp, {})
dispatch.__doc__ = generate_datum.__doc__
def make_data_key(self):
source = 'PV:{}'.format(self.prefix)
# This shape is expected to match arr.shape for the array.
shape = (self.cam.num_images.get(),
self.cam.array_size.array_size_y.get(),
self.cam.array_size.array_size_x.get())
return dict(shape=shape, source=source, dtype='array',
external='FILESTORE:')
def collect_asset_docs(self):
file_plugins = [s for s in self._signals.values() if
hasattr(s, 'collect_asset_docs')]
for p in file_plugins:
yield from p.collect_asset_docs()
class AreaDetector(DetectorBase):
cam = C(cam.AreaDetectorCam, 'cam1:')
class SimDetector(DetectorBase):
_html_docs = ['simDetectorDoc.html']
cam = C(cam.SimDetectorCam, 'cam1:')
class AdscDetector(DetectorBase):
_html_docs = ['adscDoc.html']
cam = C(cam.AdscDetectorCam, 'cam1:')
class AndorDetector(DetectorBase):
_html_docs = ['andorDoc.html']
cam = C(cam.AndorDetectorCam, 'cam1:')
class Andor3Detector(DetectorBase):
_html_docs = ['andor3Doc.html']
cam = C(cam.Andor3DetectorCam, 'cam1:')
class BrukerDetector(DetectorBase):
_html_docs = ['BrukerDoc.html']
cam = C(cam.BrukerDetectorCam, 'cam1:')
class DexelaDetector(DetectorBase):
_html_docs = ['DexelaDoc.html']
cam = C(cam.DexelaDetectorCam, 'cam1:')
class EmergentVisionDetector(DetectorBase):
_html_docs = ['EVTDoc.html']
cam = C(cam.EmergentVisionDetectorCam, 'cam1:')
class EigerDetector(DetectorBase):
_html_docs = ['EigerDoc.html']
cam = C(cam.EigerDetectorCam, 'cam1:')
class FirewireLinDetector(DetectorBase):
_html_docs = ['FirewireWinDoc.html']
cam = C(cam.FirewireLinDetectorCam, 'cam1:')
class FirewireWinDetector(DetectorBase):
_html_docs = ['FirewireWinDoc.html']
cam = C(cam.FirewireWinDetectorCam, 'cam1:')
class GreatEyesDetector(DetectorBase):
_html_docs = [] # the documentation is not public
cam = C(cam.GreatEyesDetectorCam, 'cam1:')
class LightFieldDetector(DetectorBase):
_html_docs = ['LightFieldDoc.html']
cam = C(cam.LightFieldDetectorCam, 'cam1:')
class Mar345Detector(DetectorBase):
_html_docs = ['Mar345Doc.html']
cam = C(cam.Mar345DetectorCam, 'cam1:')
class MarCCDDetector(DetectorBase):
_html_docs = ['MarCCDDoc.html']
cam = C(cam.MarCCDDetectorCam, 'cam1:')
class PerkinElmerDetector(DetectorBase):
_html_docs = ['PerkinElmerDoc.html']
cam = C(cam.PerkinElmerDetectorCam, 'cam1:')
class PSLDetector(DetectorBase):
_html_docs = ['PSLDoc.html']
cam = C(cam.PSLDetectorCam, 'cam1:')
class PICamDetector(DetectorBase):
_html_docs = ['PICamDoc.html']
cam = C(cam.PICamDetectorCam, 'cam1:')
class PilatusDetector(DetectorBase):
_html_docs = ['pilatusDoc.html']
cam = C(cam.PilatusDetectorCam, 'cam1:')
class PixiradDetector(DetectorBase):
_html_docs = ['PixiradDoc.html']
cam = C(cam.PixiradDetectorCam, 'cam1:')
class PointGreyDetector(DetectorBase):
_html_docs = ['PointGreyDoc.html']
cam = C(cam.PointGreyDetectorCam, 'cam1:')
class ProsilicaDetector(DetectorBase):
_html_docs = ['prosilicaDoc.html']
cam = C(cam.ProsilicaDetectorCam, 'cam1:')
class PvcamDetector(DetectorBase):
_html_docs = ['pvcamDoc.html']
cam = C(cam.PvcamDetectorCam, 'cam1:')
class RoperDetector(DetectorBase):
_html_docs = ['RoperDoc.html']
cam = C(cam.RoperDetectorCam, 'cam1:')
class URLDetector(DetectorBase):
_html_docs = ['URLDoc.html']
cam = C(cam.URLDetectorCam, 'cam1:')
class UVCDetector(DetectorBase):
_html_docs = ['UVCDoc.html']
cam = C(cam.UVCDetectorCam, 'cam1:')
class Xspress3Detector(DetectorBase):
_html_docs = ['Xspress3Doc.html']
cam = C(cam.Xspress3DetectorCam, 'det1:')
| [
"warnings.warn"
] | [((2736, 2823), 'warnings.warn', 'warnings.warn', (['""".dispatch is deprecated, use .generate_datum instead"""'], {'stacklevel': '(2)'}), "('.dispatch is deprecated, use .generate_datum instead',\n stacklevel=2)\n", (2749, 2823), False, 'import warnings\n')] |
import os
from functools import wraps
from os.path import join as join_path
from dash import Dash
from flask import make_response, render_template_string, redirect
excluded_resources_endpoints = (
'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout',
'/_user', '/auth')
def add_routes(app, authorizer):
"""Adds authentication endpoints to a flask app.
Decorates other endpoints to grant access.
The endpoints are:
* /login
* Method: GET
* /logout
* Method: GET
* Erases cookies
* /auth
* Method: GET
* Validates cookies if present or header authentication
* Header:
'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)'
* Sets cookies on login
* Rejects unauthorized users
Parameters
----------
app: flask.Flask or dash.Dash
The flask or dash application
excluded_resources_endpoints: tuple(str)
Tuple with endpoints where access must not be checked.
"""
def login():
ok, _ = authorizer.validate()
if ok:
return make_response(redirect('/'), 307)
return render_template_string(login_template)
def logout():
_, response = authorizer.clean_cookie()
return response
def auth():
_, response = authorizer.validate()
return response
def authorize_endpoint(function):
@wraps(function)
def authorized_function(*args, **kwargs):
ok, response = authorizer.validate()
if ok:
return function(*args, **kwargs)
return response
return authorized_function
if isinstance(app, Dash):
app = app.server
login_template = load_template('login.html')
app.add_url_rule('/auth', '/auth', auth)
app.add_url_rule('/login', '/login', login)
app.add_url_rule('/logout', '/logout', logout)
for endpoint, function in app.view_functions.items():
if endpoint not in excluded_resources_endpoints:
app.view_functions[endpoint] = authorize_endpoint(function)
def load_template(filename):
"""Loads the login html template."""
pyfile_path = os.path.dirname(os.path.abspath(__file__))
path = join_path(pyfile_path, 'templates', filename)
with open(path, 'r') as f:
return f.read().strip()
| [
"os.path.join",
"functools.wraps",
"flask.redirect",
"flask.render_template_string",
"os.path.abspath"
] | [((2264, 2309), 'os.path.join', 'join_path', (['pyfile_path', '"""templates"""', 'filename'], {}), "(pyfile_path, 'templates', filename)\n", (2273, 2309), True, 'from os.path import join as join_path\n'), ((1175, 1213), 'flask.render_template_string', 'render_template_string', (['login_template'], {}), '(login_template)\n', (1197, 1213), False, 'from flask import make_response, render_template_string, redirect\n'), ((1438, 1453), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (1443, 1453), False, 'from functools import wraps\n'), ((2226, 2251), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2241, 2251), False, 'import os\n'), ((1140, 1153), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (1148, 1153), False, 'from flask import make_response, render_template_string, redirect\n')] |
# Generated by Django 4.0.1 on 2022-04-07 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('model_api', '0004_remove_order_created_remove_order_id_and_more'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='dateTimeCreated',
),
migrations.AlterField(
model_name='order',
name='_id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='orderedproduct',
name='_id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='orderedproduct',
name='price',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((264, 330), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""order"""', 'name': '"""dateTimeCreated"""'}), "(model_name='order', name='dateTimeCreated')\n", (286, 330), False, 'from django.db import migrations, models\n'), ((472, 539), 'django.db.models.AutoField', 'models.AutoField', ([], {'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(editable=False, primary_key=True, serialize=False)\n', (488, 539), False, 'from django.db import migrations, models\n'), ((666, 733), 'django.db.models.AutoField', 'models.AutoField', ([], {'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(editable=False, primary_key=True, serialize=False)\n', (682, 733), False, 'from django.db import migrations, models\n'), ((862, 916), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)', 'null': '(True)'}), '(blank=True, max_length=20, null=True)\n', (878, 916), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-21 12:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BookCopy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])),
('remarks', models.TextField(blank=True, default='')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='BookDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('author', models.CharField(default='Unknown', max_length=1024)),
('description', models.TextField(blank=True, default='')),
('publisher', models.CharField(blank=True, default='', max_length=512)),
('published_on', models.DateField(blank=True, null=True)),
('pages', models.PositiveIntegerField(blank=True, default=0, null=True)),
('ddc', models.CharField(blank=True, default='', max_length=1024)),
('llcc', models.CharField(blank=True, default='', max_length=1024)),
('isbn', models.CharField(blank=True, default='', max_length=1024)),
('tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=512)),
('slug', models.SlugField(max_length=128, unique=True)),
('description', models.TextField(blank=True, default='')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('short_code', models.CharField(db_index=True, max_length=8, unique=True)),
('description', models.TextField(blank=True, default='')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Periodical',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('description', models.TextField(blank=True, default='')),
('publisher', models.CharField(blank=True, default='', max_length=512)),
('tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PeriodicalIssue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('issue_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])),
('published_on', models.DateField(blank=True, null=True)),
('volume', models.PositiveIntegerField(blank=True, null=True)),
('issue', models.PositiveIntegerField(blank=True, null=True)),
('remarks', models.TextField(blank=True, default='')),
('tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='bookdetail',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'),
),
migrations.AddField(
model_name='bookdetail',
name='created_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bookdetail',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'),
),
migrations.AddField(
model_name='bookdetail',
name='updated_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bookcopy',
name='book_detail',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'),
),
migrations.AddField(
model_name='bookcopy',
name='created_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bookcopy',
name='updated_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_copy_updated_by', to=settings.AUTH_USER_MODEL),
),
]
| [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.SlugField",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((309, 366), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (340, 366), False, 'from django.db import migrations, models\n'), ((6611, 6699), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""items.Category"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'items.Category')\n", (6628, 6699), False, 'from django.db import migrations, models\n'), ((6822, 6918), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (6839, 6918), False, 'from django.db import migrations, models\n'), ((7039, 7127), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""items.Language"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'items.Language')\n", (7056, 7127), False, 'from django.db import migrations, models\n'), ((7250, 7385), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""book_detail_updated_by"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='book_detail_updated_by', to=settings.AUTH_USER_MODEL)\n", (7267, 7385), False, 'from django.db import migrations, models\n'), ((7507, 7597), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""items.BookDetail"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'items.BookDetail')\n", (7524, 7597), False, 'from django.db import migrations, models\n'), ((7718, 7814), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (7735, 7814), False, 'from django.db import migrations, models\n'), ((7935, 8068), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""book_copy_updated_by"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='book_copy_updated_by', to=settings.AUTH_USER_MODEL)\n", (7952, 8068), False, 'from django.db import migrations, models\n'), ((499, 592), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (515, 592), False, 'from django.db import migrations, models\n'), ((623, 787), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (\n 4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')]"}), "(choices=[(1, 'Available'), (2, 'In Circulation'), (3,\n 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6,\n 'Damaged')])\n", (642, 787), False, 'from django.db import migrations, models\n'), ((810, 850), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (826, 850), False, 'from django.db import migrations, models\n'), ((884, 923), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (904, 923), False, 'from django.db import migrations, models\n'), ((957, 992), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (977, 992), False, 'from django.db import migrations, models\n'), ((1128, 1221), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1144, 1221), False, 'from django.db import migrations, models\n'), ((1246, 1279), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1024)'}), '(max_length=1024)\n', (1262, 1279), False, 'from django.db import migrations, models\n'), ((1309, 1361), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Unknown"""', 'max_length': '(1024)'}), "(default='Unknown', max_length=1024)\n", (1325, 1361), False, 'from django.db import migrations, models\n'), ((1396, 1436), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (1412, 1436), False, 'from django.db import migrations, models\n'), ((1469, 1525), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(512)'}), "(blank=True, default='', max_length=512)\n", (1485, 1525), False, 'from django.db import migrations, models\n'), ((1561, 1600), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1577, 1600), False, 'from django.db import migrations, models\n'), ((1629, 1690), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'default': '(0)', 'null': '(True)'}), '(blank=True, default=0, null=True)\n', (1656, 1690), False, 'from django.db import migrations, models\n'), ((1717, 1774), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(1024)'}), "(blank=True, default='', max_length=1024)\n", (1733, 1774), False, 'from django.db import migrations, models\n'), ((1802, 1859), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(1024)'}), "(blank=True, default='', max_length=1024)\n", (1818, 1859), False, 'from django.db import migrations, models\n'), ((1887, 1944), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(1024)'}), "(blank=True, default='', max_length=1024)\n", (1903, 1944), False, 'from django.db import migrations, models\n'), ((1972, 2028), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(1024)', 'null': '(True)'}), '(blank=True, max_length=1024, null=True)\n', (1988, 2028), False, 'from django.db import migrations, models\n'), ((2062, 2101), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2082, 2101), False, 'from django.db import migrations, models\n'), ((2135, 2170), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2155, 2170), False, 'from django.db import migrations, models\n'), ((2304, 2397), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2320, 2397), False, 'from django.db import migrations, models\n'), ((2422, 2454), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)'}), '(max_length=512)\n', (2438, 2454), False, 'from django.db import migrations, models\n'), ((2482, 2527), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(128)', 'unique': '(True)'}), '(max_length=128, unique=True)\n', (2498, 2527), False, 'from django.db import migrations, models\n'), ((2562, 2602), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (2578, 2602), False, 'from django.db import migrations, models\n'), ((2636, 2675), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2656, 2675), False, 'from django.db import migrations, models\n'), ((2709, 2744), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2729, 2744), False, 'from django.db import migrations, models\n'), ((2778, 2874), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (2795, 2874), False, 'from django.db import migrations, models\n'), ((2903, 3035), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""category_updated_by"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='category_updated_by', to=settings.AUTH_USER_MODEL)\n", (2920, 3035), False, 'from django.db import migrations, models\n'), ((3164, 3257), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3180, 3257), False, 'from django.db import migrations, models\n'), ((3281, 3313), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)'}), '(max_length=512)\n', (3297, 3313), False, 'from django.db import migrations, models\n'), ((3347, 3405), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(8)', 'unique': '(True)'}), '(db_index=True, max_length=8, unique=True)\n', (3363, 3405), False, 'from django.db import migrations, models\n'), ((3440, 3480), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (3456, 3480), False, 'from django.db import migrations, models\n'), ((3514, 3553), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3534, 3553), False, 'from django.db import migrations, models\n'), ((3587, 3622), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (3607, 3622), False, 'from django.db import migrations, models\n'), ((3656, 3752), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (3673, 3752), False, 'from django.db import migrations, models\n'), ((3781, 3913), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""language_updated_by"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='language_updated_by', to=settings.AUTH_USER_MODEL)\n", (3798, 3913), False, 'from django.db import migrations, models\n'), ((4044, 4137), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (4060, 4137), False, 'from django.db import migrations, models\n'), ((4162, 4195), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1024)'}), '(max_length=1024)\n', (4178, 4195), False, 'from django.db import migrations, models\n'), ((4230, 4270), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (4246, 4270), False, 'from django.db import migrations, models\n'), ((4303, 4359), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(512)'}), "(blank=True, default='', max_length=512)\n", (4319, 4359), False, 'from django.db import migrations, models\n'), ((4387, 4443), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(1024)', 'null': '(True)'}), '(blank=True, max_length=1024, null=True)\n', (4403, 4443), False, 'from django.db import migrations, models\n'), ((4477, 4516), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4497, 4516), False, 'from django.db import migrations, models\n'), ((4550, 4585), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (4570, 4585), False, 'from django.db import migrations, models\n'), ((4617, 4705), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""items.Category"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'items.Category')\n", (4634, 4705), False, 'from django.db import migrations, models\n'), ((4734, 4830), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (4751, 4830), False, 'from django.db import migrations, models\n'), ((4857, 4945), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""items.Language"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'items.Language')\n", (4874, 4945), False, 'from django.db import migrations, models\n'), ((4974, 5108), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""periodical_updated_by"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='periodical_updated_by', to=settings.AUTH_USER_MODEL)\n", (4991, 5108), False, 'from django.db import migrations, models\n'), ((5244, 5337), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (5260, 5337), False, 'from django.db import migrations, models\n'), ((5369, 5533), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (\n 4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')]"}), "(choices=[(1, 'Available'), (2, 'In Circulation'), (3,\n 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6,\n 'Damaged')])\n", (5388, 5533), False, 'from django.db import migrations, models\n'), ((5561, 5600), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5577, 5600), False, 'from django.db import migrations, models\n'), ((5630, 5680), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5657, 5680), False, 'from django.db import migrations, models\n'), ((5709, 5759), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5736, 5759), False, 'from django.db import migrations, models\n'), ((5790, 5830), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (5806, 5830), False, 'from django.db import migrations, models\n'), ((5858, 5914), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(1024)', 'null': '(True)'}), '(blank=True, max_length=1024, null=True)\n', (5874, 5914), False, 'from django.db import migrations, models\n'), ((5948, 5987), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (5968, 5987), False, 'from django.db import migrations, models\n'), ((6021, 6056), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (6041, 6056), False, 'from django.db import migrations, models\n'), ((6090, 6186), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (6107, 6186), False, 'from django.db import migrations, models\n'), ((6215, 6305), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""items.Periodical"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'items.Periodical')\n", (6232, 6305), False, 'from django.db import migrations, models\n'), ((6334, 6474), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""periodical_issue_updated_by"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)\n", (6351, 6474), False, 'from django.db import migrations, models\n')] |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import redirect
from indico.modules.events.abstracts.models.abstracts import Abstract
from indico.web.flask.util import url_for
from indico.web.rh import RHSimple
@RHSimple.wrap_function
def compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False):
abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404()
return redirect(url_for('abstracts.' + endpoint, abstract, management=management))
| [
"indico.web.flask.util.url_for",
"indico.modules.events.abstracts.models.abstracts.Abstract.find"
] | [((606, 671), 'indico.web.flask.util.url_for', 'url_for', (["('abstracts.' + endpoint)", 'abstract'], {'management': 'management'}), "('abstracts.' + endpoint, abstract, management=management)\n", (613, 671), False, 'from indico.web.flask.util import url_for\n'), ((515, 570), 'indico.modules.events.abstracts.models.abstracts.Abstract.find', 'Abstract.find', ([], {'event_id': 'confId', 'friendly_id': 'friendly_id'}), '(event_id=confId, friendly_id=friendly_id)\n', (528, 570), False, 'from indico.modules.events.abstracts.models.abstracts import Abstract\n')] |
from parsel import Selector
import requests, json, re
params = {
"q": "<NAME>",
"tbm": "bks",
"gl": "us",
"hl": "en"
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36",
}
html = requests.get("https://www.google.com/search", params=params, headers=headers, timeout=30)
selector = Selector(text=html.text)
books_results = []
# https://regex101.com/r/mapBs4/1
book_thumbnails = re.findall(r"s=\\'data:image/jpg;base64,(.*?)\\'", str(selector.css("script").getall()), re.DOTALL)
for book_thumbnail, book_result in zip(book_thumbnails, selector.css(".Yr5TG")):
title = book_result.css(".DKV0Md::text").get()
link = book_result.css(".bHexk a::attr(href)").get()
displayed_link = book_result.css(".tjvcx::text").get()
snippet = book_result.css(".cmlJmd span::text").get()
author = book_result.css(".fl span::text").get()
author_link = f'https://www.google.com/search{book_result.css(".N96wpd .fl::attr(href)").get()}'
date_published = book_result.css(".fl+ span::text").get()
preview_link = book_result.css(".R1n8Q a.yKioRe:nth-child(1)::attr(href)").get()
more_editions_link = book_result.css(".R1n8Q a.yKioRe:nth-child(2)::attr(href)").get()
books_results.append({
"title": title,
"link": link,
"displayed_link": displayed_link,
"snippet": snippet,
"author": author,
"author_link": author_link,
"date_published": date_published,
"preview_link": preview_link,
"more_editions_link": f"https://www.google.com{more_editions_link}" if more_editions_link is not None else None,
"thumbnail": bytes(bytes(book_thumbnail, "ascii").decode("unicode-escape"), "ascii").decode("unicode-escape")
})
| [
"requests.get",
"parsel.Selector"
] | [((295, 389), 'requests.get', 'requests.get', (['"""https://www.google.com/search"""'], {'params': 'params', 'headers': 'headers', 'timeout': '(30)'}), "('https://www.google.com/search', params=params, headers=\n headers, timeout=30)\n", (307, 389), False, 'import requests, json, re\n'), ((396, 420), 'parsel.Selector', 'Selector', ([], {'text': 'html.text'}), '(text=html.text)\n', (404, 420), False, 'from parsel import Selector\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import io
import json
import os
import random
import re
import string
import time
from functools import wraps
from hashlib import sha1
import six
try:
from secrets import choice
except ImportError:
from random import choice
string_types = (six.string_types, six.text_type, six.binary_type)
re_type = type(re.compile("regex_test"))
def get_signature(token, timestamp, nonce, *args):
sign = [token, timestamp, nonce] + list(args)
sign.sort()
sign = to_binary(''.join(sign))
return sha1(sign).hexdigest()
def check_signature(token, timestamp, nonce, signature):
if not (token and timestamp and nonce and signature):
return False
sign = get_signature(token, timestamp, nonce)
return sign == signature
def check_token(token):
return re.match('^[A-Za-z0-9]{3,32}$', token)
def cached_property(method):
prop_name = '_{}'.format(method.__name__)
@wraps(method)
def wrapped_func(self, *args, **kwargs):
if not hasattr(self, prop_name):
setattr(self, prop_name, method(self, *args, **kwargs))
return getattr(self, prop_name)
return property(wrapped_func)
def to_text(value, encoding="utf-8"):
if isinstance(value, six.text_type):
return value
if isinstance(value, six.binary_type):
return value.decode(encoding)
return six.text_type(value)
def to_binary(value, encoding="utf-8"):
if isinstance(value, six.binary_type):
return value
if isinstance(value, six.text_type):
return value.encode(encoding)
return six.binary_type(value)
def is_string(value):
return isinstance(value, string_types)
def byte2int(s, index=0):
"""Get the ASCII int value of a character in a string.
:param s: a string
:param index: the position of desired character
:return: ASCII int value
"""
if six.PY2:
return ord(s[index])
return s[index]
def generate_token(length=''):
if not length:
length = random.randint(3, 32)
length = int(length)
assert 3 <= length <= 32
letters = string.ascii_letters + string.digits
return ''.join(choice(letters) for _ in range(length))
def json_loads(s):
s = to_text(s)
return json.loads(s)
def json_dumps(d):
return json.dumps(d)
def pay_sign_dict(
appid,
pay_sign_key,
add_noncestr=True,
add_timestamp=True,
add_appid=True,
**kwargs
):
"""
支付参数签名
"""
assert pay_sign_key, "PAY SIGN KEY IS EMPTY"
if add_appid:
kwargs.update({'appid': appid})
if add_noncestr:
kwargs.update({'noncestr': generate_token()})
if add_timestamp:
kwargs.update({'timestamp': int(time.time())})
params = kwargs.items()
_params = [
(k.lower(), v) for k, v in kwargs.items() if k.lower() != "appid"
]
_params += [('appid', appid), ('appkey', pay_sign_key)]
_params.sort()
sign = '&'.join(["%s=%s" % (str(p[0]), str(p[1]))
for p in _params]).encode("utf-8")
sign = sha1(sign).hexdigest()
sign_type = 'SHA1'
return dict(params), sign, sign_type
def make_error_page(url):
with io.open(
os.path.join(os.path.dirname(__file__), 'contrib/error.html'),
'r',
encoding='utf-8'
) as error_page:
return error_page.read().replace('{url}', url)
def is_regex(value):
return isinstance(value, re_type)
| [
"json.loads",
"random.choice",
"re.compile",
"json.dumps",
"six.binary_type",
"re.match",
"functools.wraps",
"os.path.dirname",
"six.text_type",
"hashlib.sha1",
"time.time",
"random.randint"
] | [((399, 423), 're.compile', 're.compile', (['"""regex_test"""'], {}), "('regex_test')\n", (409, 423), False, 'import re\n'), ((868, 906), 're.match', 're.match', (['"""^[A-Za-z0-9]{3,32}$"""', 'token'], {}), "('^[A-Za-z0-9]{3,32}$', token)\n", (876, 906), False, 'import re\n'), ((990, 1003), 'functools.wraps', 'wraps', (['method'], {}), '(method)\n', (995, 1003), False, 'from functools import wraps\n'), ((1427, 1447), 'six.text_type', 'six.text_type', (['value'], {}), '(value)\n', (1440, 1447), False, 'import six\n'), ((1644, 1666), 'six.binary_type', 'six.binary_type', (['value'], {}), '(value)\n', (1659, 1666), False, 'import six\n'), ((2306, 2319), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2316, 2319), False, 'import json\n'), ((2352, 2365), 'json.dumps', 'json.dumps', (['d'], {}), '(d)\n', (2362, 2365), False, 'import json\n'), ((2069, 2090), 'random.randint', 'random.randint', (['(3)', '(32)'], {}), '(3, 32)\n', (2083, 2090), False, 'import random\n'), ((591, 601), 'hashlib.sha1', 'sha1', (['sign'], {}), '(sign)\n', (595, 601), False, 'from hashlib import sha1\n'), ((2215, 2230), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (2221, 2230), False, 'from random import choice\n'), ((3115, 3125), 'hashlib.sha1', 'sha1', (['sign'], {}), '(sign)\n', (3119, 3125), False, 'from hashlib import sha1\n'), ((3270, 3295), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3285, 3295), False, 'import os\n'), ((2773, 2784), 'time.time', 'time.time', ([], {}), '()\n', (2782, 2784), False, 'import time\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Import names of Tensor Flow standard Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform as _platform
import sys as _sys
from tensorflow.python import autograph
from tensorflow.python.training.experimental import loss_scaling_gradient_tape
# pylint: disable=g-bad-import-order
# Imports the following modules so that @RegisterGradient get executed.
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import cudnn_rnn_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import manip_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import random_grad
from tensorflow.python.ops import rnn_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
# TODO(vrv): Switch to import * once we're okay with exposing the module.
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin
# pylint: enable=redefined-builtin
from tensorflow.python.eager import wrap_function
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.batch_ops import *
from tensorflow.python.ops.critical_section_ops import *
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.logging_ops import timestamp
from tensorflow.python.ops.lookup_ops import initialize_all_tables
from tensorflow.python.ops.lookup_ops import tables_initializer
from tensorflow.python.ops.manip_ops import *
from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.proto_ops import *
from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch
from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sort_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_min
from tensorflow.python.ops.state_ops import scatter_max
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.stateless_random_ops import *
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.variables import *
from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map
# pylint: disable=g-import-not-at-top
if _platform.system() == "Windows":
from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt
else:
from tensorflow.python.compiler.tensorrt import trt_convert as trt
# pylint: enable=g-import-not-at-top
# pylint: enable=wildcard-import
# pylint: enable=g-bad-import-order
# These modules were imported to set up RaggedTensor operators and dispatchers:
del _ragged_dispatch, _ragged_operators
| [
"platform.system"
] | [((5500, 5518), 'platform.system', '_platform.system', ([], {}), '()\n', (5516, 5518), True, 'import platform as _platform\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test Tango device server for use with scaling tests."""
import sys
import time
import argparse
import tango
from tango.server import run
from TestDevice import TestDevice
def init_callback():
"""Report server start up times.
This callback is executed post server initialisation.
"""
# pylint: disable=global-statement
global START_TIME
db = tango.Database()
elapsed = time.time() - START_TIME
list_devices()
exported_devices = list(db.get_device_exported('test/*'))
num_devices = len(exported_devices)
file = open('results.txt', 'a')
file.write(',{},{}\n'.format(elapsed, elapsed / num_devices))
print('>> Time taken to start devices: {:.4f} s ({:.4f} s/dev)'
.format(elapsed, elapsed / num_devices))
def delete_server():
"""Delete the TestDeviceServer from the tango db."""
db = tango.Database()
db.set_timeout_millis(50000)
server = 'TestDeviceServer/1'
server_list = list(db.get_server_list(server))
if server in server_list:
start_time = time.time()
db.delete_server('TestDeviceServer/1')
print('- Delete server: {:.4f} s'.format(time.time() - start_time))
def register(num_devices):
"""Register devices in the tango db."""
db = tango.Database()
device_info = tango.DbDevInfo()
device_info.server = 'TestDeviceServer/1'
# pylint: disable=protected-access
device_info._class = 'TestDevice'
start_time = time.time()
for device_id in range(num_devices):
device_info.name = 'test/test_device/{:05d}'.format(device_id)
db.add_device(device_info)
elapsed = time.time() - start_time
file = open('results.txt', 'a')
file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices))
print('- Register devices: {:.4f} s ({:.4f} s/device)'
.format(elapsed, elapsed / num_devices))
def list_devices():
"""List tango devices associated with the TestDeviceServer."""
db = tango.Database()
server_instance = 'TestDeviceServer/1'
device_class = 'TestDevice'
devices = list(db.get_device_name(server_instance, device_class))
print('- No. registered devices: {}'.format(len(devices)))
exported_devices = list(db.get_device_exported('test/*'))
print('- No. running devices: {}'.format(len(exported_devices)))
def main(args=None, **kwargs):
"""Run (start) the device server."""
run([TestDevice], verbose=True, msg_stream=sys.stdout,
post_init_callback=init_callback, raises=False,
args=args, **kwargs)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Device registration time.')
PARSER.add_argument('num_devices', metavar='N', type=int,
default=1, nargs='?',
help='Number of devices to start.')
ARGS = PARSER.parse_args()
delete_server()
time.sleep(0.5)
list_devices()
print('* Registering {} devices'.format(ARGS.num_devices))
register(ARGS.num_devices)
list_devices()
print('* Starting server ...')
sys.argv = ['TestDeviceServer', '1', '-v4']
START_TIME = time.time()
main()
| [
"tango.server.run",
"argparse.ArgumentParser",
"tango.Database",
"time.sleep",
"time.time",
"tango.DbDevInfo"
] | [((421, 437), 'tango.Database', 'tango.Database', ([], {}), '()\n', (435, 437), False, 'import tango\n'), ((908, 924), 'tango.Database', 'tango.Database', ([], {}), '()\n', (922, 924), False, 'import tango\n'), ((1313, 1329), 'tango.Database', 'tango.Database', ([], {}), '()\n', (1327, 1329), False, 'import tango\n'), ((1348, 1365), 'tango.DbDevInfo', 'tango.DbDevInfo', ([], {}), '()\n', (1363, 1365), False, 'import tango\n'), ((1508, 1519), 'time.time', 'time.time', ([], {}), '()\n', (1517, 1519), False, 'import time\n'), ((2027, 2043), 'tango.Database', 'tango.Database', ([], {}), '()\n', (2041, 2043), False, 'import tango\n'), ((2462, 2590), 'tango.server.run', 'run', (['[TestDevice]'], {'verbose': '(True)', 'msg_stream': 'sys.stdout', 'post_init_callback': 'init_callback', 'raises': '(False)', 'args': 'args'}), '([TestDevice], verbose=True, msg_stream=sys.stdout, post_init_callback=\n init_callback, raises=False, args=args, **kwargs)\n', (2465, 2590), False, 'from tango.server import run\n'), ((2645, 2709), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Device registration time."""'}), "(description='Device registration time.')\n", (2668, 2709), False, 'import argparse\n'), ((2933, 2948), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2943, 2948), False, 'import time\n'), ((3185, 3196), 'time.time', 'time.time', ([], {}), '()\n', (3194, 3196), False, 'import time\n'), ((452, 463), 'time.time', 'time.time', ([], {}), '()\n', (461, 463), False, 'import time\n'), ((1096, 1107), 'time.time', 'time.time', ([], {}), '()\n', (1105, 1107), False, 'import time\n'), ((1681, 1692), 'time.time', 'time.time', ([], {}), '()\n', (1690, 1692), False, 'import time\n'), ((1204, 1215), 'time.time', 'time.time', ([], {}), '()\n', (1213, 1215), False, 'import time\n')] |
from pathlib import Path
import pytest
from haystack.document_store.elasticsearch import ElasticsearchDocumentStore
from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, \
DocumentSearchPipeline, RootNode
from haystack.retriever.dense import DensePassageRetriever
from haystack.retriever.sparse import ElasticsearchRetriever
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
def test_load_yaml(document_store_with_docs):
# test correct load of indexing pipeline from yaml
pipeline = Pipeline.load_from_yaml(Path("samples/pipeline/test_pipeline.yaml"),
pipeline_name="indexing_pipeline")
pipeline.run(file_path=Path("samples/pdf/sample_pdf_1.pdf"), top_k_retriever=10, top_k_reader=3)
# test correct load of query pipeline from yaml
pipeline = Pipeline.load_from_yaml(Path("samples/pipeline/test_pipeline.yaml"), pipeline_name="query_pipeline")
prediction = pipeline.run(query="Who made the PDF specification?", top_k_retriever=10, top_k_reader=3)
assert prediction["query"] == "Who made the PDF specification?"
assert prediction["answers"][0]["answer"] == "Adobe Systems"
# test invalid pipeline name
with pytest.raises(Exception):
Pipeline.load_from_yaml(path=Path("samples/pipeline/test_pipeline.yaml"), pipeline_name="invalid")
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever_with_docs, document_store_with_docs", [("elasticsearch", "elasticsearch")], indirect=True
)
def test_graph_creation(reader, retriever_with_docs, document_store_with_docs):
pipeline = Pipeline()
pipeline.add_node(name="ES", component=retriever_with_docs, inputs=["Query"])
with pytest.raises(AssertionError):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["ES.output_2"])
with pytest.raises(AssertionError):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["ES.wrong_edge_label"])
with pytest.raises(Exception):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["InvalidNode"])
with pytest.raises(Exception):
pipeline = Pipeline()
pipeline.add_node(name="ES", component=retriever_with_docs, inputs=["InvalidNode"])
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
prediction = pipeline.run(query="Who lives in Berlin?", top_k_retriever=10, top_k_reader=3)
assert prediction is not None
assert prediction["query"] == "Who lives in Berlin?"
assert prediction["answers"][0]["answer"] == "Carla"
assert prediction["answers"][0]["probability"] <= 1
assert prediction["answers"][0]["probability"] >= 0
assert prediction["answers"][0]["meta"]["meta_field"] == "test1"
assert prediction["answers"][0]["context"] == "My name is Carla and I live in Berlin"
assert len(prediction["answers"]) == 3
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_offsets(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
prediction = pipeline.run(query="Who lives in Berlin?", top_k_retriever=10, top_k_reader=5)
assert prediction["answers"][0]["offset_start"] == 11
assert prediction["answers"][0]["offset_end"] == 16
start = prediction["answers"][0]["offset_start"]
end = prediction["answers"][0]["offset_end"]
assert prediction["answers"][0]["context"][start:end] == prediction["answers"][0]["answer"]
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers_single_result(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
query = "testing finder"
prediction = pipeline.run(query=query, top_k_retriever=1, top_k_reader=1)
assert prediction is not None
assert len(prediction["answers"]) == 1
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever,document_store",
[("embedding", "memory"), ("embedding", "faiss"), ("embedding", "milvus"), ("embedding", "elasticsearch")],
indirect=True,
)
def test_faq_pipeline(retriever, document_store):
documents = [
{"text": "How to test module-1?", 'meta': {"source": "wiki1", "answer": "Using tests for module-1"}},
{"text": "How to test module-2?", 'meta': {"source": "wiki2", "answer": "Using tests for module-2"}},
{"text": "How to test module-3?", 'meta': {"source": "wiki3", "answer": "Using tests for module-3"}},
{"text": "How to test module-4?", 'meta': {"source": "wiki4", "answer": "Using tests for module-4"}},
{"text": "How to test module-5?", 'meta': {"source": "wiki5", "answer": "Using tests for module-5"}},
]
document_store.write_documents(documents)
document_store.update_embeddings(retriever)
pipeline = FAQPipeline(retriever=retriever)
output = pipeline.run(query="How to test this?", top_k_retriever=3)
assert len(output["answers"]) == 3
assert output["answers"][0]["query"].startswith("How to")
assert output["answers"][0]["answer"].startswith("Using tests")
if isinstance(document_store, ElasticsearchDocumentStore):
output = pipeline.run(query="How to test this?", filters={"source": ["wiki2"]}, top_k_retriever=5)
assert len(output["answers"]) == 1
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever,document_store",
[("embedding", "memory"), ("embedding", "faiss"), ("embedding", "milvus"), ("embedding", "elasticsearch")],
indirect=True,
)
def test_document_search_pipeline(retriever, document_store):
documents = [
{"text": "Sample text for document-1", 'meta': {"source": "wiki1"}},
{"text": "Sample text for document-2", 'meta': {"source": "wiki2"}},
{"text": "Sample text for document-3", 'meta': {"source": "wiki3"}},
{"text": "Sample text for document-4", 'meta': {"source": "wiki4"}},
{"text": "Sample text for document-5", 'meta': {"source": "wiki5"}},
]
document_store.write_documents(documents)
document_store.update_embeddings(retriever)
pipeline = DocumentSearchPipeline(retriever=retriever)
output = pipeline.run(query="How to test this?", top_k_retriever=4)
assert len(output.get('documents', [])) == 4
if isinstance(document_store, ElasticsearchDocumentStore):
output = pipeline.run(query="How to test this?", filters={"source": ["wiki2"]}, top_k_retriever=5)
assert len(output["documents"]) == 1
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator):
base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
pipeline = TranslationWrapperPipeline(
input_translator=de_to_en_translator,
output_translator=en_to_de_translator,
pipeline=base_pipeline
)
prediction = pipeline.run(query="Wer lebt in Berlin?", top_k_retriever=10, top_k_reader=3)
assert prediction is not None
assert prediction["query"] == "Wer lebt in Berlin?"
assert "Carla" in prediction["answers"][0]["answer"]
assert prediction["answers"][0]["probability"] <= 1
assert prediction["answers"][0]["probability"] >= 0
assert prediction["answers"][0]["meta"]["meta_field"] == "test1"
assert prediction["answers"][0]["context"] == "My name is Carla and I live in Berlin"
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
def test_join_document_pipeline(document_store_with_docs, reader):
es = ElasticsearchRetriever(document_store=document_store_with_docs)
dpr = DensePassageRetriever(
document_store=document_store_with_docs,
query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base",
use_gpu=False,
)
document_store_with_docs.update_embeddings(dpr)
query = "Where does Carla lives?"
# test merge without weights
join_node = JoinDocuments(join_mode="merge")
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert len(results["documents"]) == 3
# test merge with weights
join_node = JoinDocuments(join_mode="merge", weights=[1000, 1], top_k_join=2)
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert results["documents"][0].score > 1000
assert len(results["documents"]) == 2
# test concatenate
join_node = JoinDocuments(join_mode="concatenate")
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert len(results["documents"]) == 3
# test join_node with reader
join_node = JoinDocuments()
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
p.add_node(component=reader, name="Reader", inputs=["Join"])
results = p.run(query=query)
assert results["answers"][0]["answer"] == "Berlin"
def test_parallel_paths_in_pipeline_graph():
class A(RootNode):
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_1"
class B(RootNode):
def run(self, **kwargs):
kwargs["output"] += "B"
return kwargs, "output_1"
class C(RootNode):
def run(self, **kwargs):
kwargs["output"] += "C"
return kwargs, "output_1"
class D(RootNode):
def run(self, **kwargs):
kwargs["output"] += "D"
return kwargs, "output_1"
class E(RootNode):
def run(self, **kwargs):
kwargs["output"] += "E"
return kwargs, "output_1"
class JoinNode(RootNode):
def run(self, **kwargs):
kwargs["output"] = kwargs["inputs"][0]["output"] + kwargs["inputs"][1]["output"]
return kwargs, "output_1"
pipeline = Pipeline()
pipeline.add_node(name="A", component=A(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A"])
pipeline.add_node(name="C", component=C(), inputs=["B"])
pipeline.add_node(name="E", component=E(), inputs=["C"])
pipeline.add_node(name="D", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E"])
output = pipeline.run(query="test")
assert output["output"] == "ABDABCE"
pipeline = Pipeline()
pipeline.add_node(name="A", component=A(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A"])
pipeline.add_node(name="C", component=C(), inputs=["B"])
pipeline.add_node(name="D", component=D(), inputs=["B"])
pipeline.add_node(name="E", component=JoinNode(), inputs=["C", "D"])
output = pipeline.run(query="test")
assert output["output"] == "ABCABD"
def test_parallel_paths_in_pipeline_graph_with_branching():
class AWithOutput1(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_1"
class AWithOutput2(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_2"
class AWithOutputAll(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_all"
class B(RootNode):
def run(self, **kwargs):
kwargs["output"] += "B"
return kwargs, "output_1"
class C(RootNode):
def run(self, **kwargs):
kwargs["output"] += "C"
return kwargs, "output_1"
class D(RootNode):
def run(self, **kwargs):
kwargs["output"] += "D"
return kwargs, "output_1"
class E(RootNode):
def run(self, **kwargs):
kwargs["output"] += "E"
return kwargs, "output_1"
class JoinNode(RootNode):
def run(self, **kwargs):
if kwargs.get("inputs"):
kwargs["output"] = ""
for input_dict in kwargs["inputs"]:
kwargs["output"] += (input_dict["output"])
return kwargs, "output_1"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutput1(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "ABEABD"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutput2(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "AC"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutputAll(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "ACABEABD"
| [
"haystack.pipeline.ExtractiveQAPipeline",
"haystack.pipeline.FAQPipeline",
"pathlib.Path",
"haystack.retriever.sparse.ElasticsearchRetriever",
"haystack.retriever.dense.DensePassageRetriever",
"pytest.mark.parametrize",
"haystack.pipeline.DocumentSearchPipeline",
"pytest.raises",
"haystack.pipeline.Pipeline",
"haystack.pipeline.TranslationWrapperPipeline",
"haystack.pipeline.JoinDocuments"
] | [((398, 487), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""document_store_with_docs"""', "['elasticsearch']"], {'indirect': '(True)'}), "('document_store_with_docs', ['elasticsearch'],\n indirect=True)\n", (421, 487), False, 'import pytest\n'), ((1477, 1607), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""retriever_with_docs, document_store_with_docs"""', "[('elasticsearch', 'elasticsearch')]"], {'indirect': '(True)'}), "('retriever_with_docs, document_store_with_docs', [(\n 'elasticsearch', 'elasticsearch')], indirect=True)\n", (1500, 1607), False, 'import pytest\n'), ((2417, 2489), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""retriever_with_docs"""', "['tfidf']"], {'indirect': '(True)'}), "('retriever_with_docs', ['tfidf'], indirect=True)\n", (2440, 2489), False, 'import pytest\n'), ((3222, 3294), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""retriever_with_docs"""', "['tfidf']"], {'indirect': '(True)'}), "('retriever_with_docs', ['tfidf'], indirect=True)\n", (3245, 3294), False, 'import pytest\n'), ((3895, 3967), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""retriever_with_docs"""', "['tfidf']"], {'indirect': '(True)'}), "('retriever_with_docs', ['tfidf'], indirect=True)\n", (3918, 3967), False, 'import pytest\n'), ((4339, 4522), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""retriever,document_store"""', "[('embedding', 'memory'), ('embedding', 'faiss'), ('embedding', 'milvus'),\n ('embedding', 'elasticsearch')]"], {'indirect': '(True)'}), "('retriever,document_store', [('embedding', 'memory'\n ), ('embedding', 'faiss'), ('embedding', 'milvus'), ('embedding',\n 'elasticsearch')], indirect=True)\n", (4362, 4522), False, 'import pytest\n'), ((5783, 5966), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""retriever,document_store"""', "[('embedding', 'memory'), ('embedding', 'faiss'), ('embedding', 'milvus'),\n ('embedding', 'elasticsearch')]"], {'indirect': '(True)'}), "('retriever,document_store', [('embedding', 'memory'\n ), ('embedding', 'faiss'), ('embedding', 'milvus'), ('embedding',\n 'elasticsearch')], indirect=True)\n", (5806, 5966), False, 'import pytest\n'), ((6984, 7056), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""retriever_with_docs"""', "['tfidf']"], {'indirect': '(True)'}), "('retriever_with_docs', ['tfidf'], indirect=True)\n", (7007, 7056), False, 'import pytest\n'), ((7953, 8042), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""document_store_with_docs"""', "['elasticsearch']"], {'indirect': '(True)'}), "('document_store_with_docs', ['elasticsearch'],\n indirect=True)\n", (7976, 8042), False, 'import pytest\n'), ((8040, 8098), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""reader"""', "['farm']"], {'indirect': '(True)'}), "('reader', ['farm'], indirect=True)\n", (8063, 8098), False, 'import pytest\n'), ((1704, 1714), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (1712, 1714), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((2566, 2632), 'haystack.pipeline.ExtractiveQAPipeline', 'ExtractiveQAPipeline', ([], {'reader': 'reader', 'retriever': 'retriever_with_docs'}), '(reader=reader, retriever=retriever_with_docs)\n', (2586, 2632), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((3371, 3437), 'haystack.pipeline.ExtractiveQAPipeline', 'ExtractiveQAPipeline', ([], {'reader': 'reader', 'retriever': 'retriever_with_docs'}), '(reader=reader, retriever=retriever_with_docs)\n', (3391, 3437), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((4058, 4124), 'haystack.pipeline.ExtractiveQAPipeline', 'ExtractiveQAPipeline', ([], {'reader': 'reader', 'retriever': 'retriever_with_docs'}), '(reader=reader, retriever=retriever_with_docs)\n', (4078, 4124), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((5264, 5296), 'haystack.pipeline.FAQPipeline', 'FAQPipeline', ([], {'retriever': 'retriever'}), '(retriever=retriever)\n', (5275, 5296), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((6555, 6598), 'haystack.pipeline.DocumentSearchPipeline', 'DocumentSearchPipeline', ([], {'retriever': 'retriever'}), '(retriever=retriever)\n', (6577, 6598), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((7196, 7262), 'haystack.pipeline.ExtractiveQAPipeline', 'ExtractiveQAPipeline', ([], {'reader': 'reader', 'retriever': 'retriever_with_docs'}), '(reader=reader, retriever=retriever_with_docs)\n', (7216, 7262), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((7278, 7409), 'haystack.pipeline.TranslationWrapperPipeline', 'TranslationWrapperPipeline', ([], {'input_translator': 'de_to_en_translator', 'output_translator': 'en_to_de_translator', 'pipeline': 'base_pipeline'}), '(input_translator=de_to_en_translator,\n output_translator=en_to_de_translator, pipeline=base_pipeline)\n', (7304, 7409), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((8175, 8238), 'haystack.retriever.sparse.ElasticsearchRetriever', 'ElasticsearchRetriever', ([], {'document_store': 'document_store_with_docs'}), '(document_store=document_store_with_docs)\n', (8197, 8238), False, 'from haystack.retriever.sparse import ElasticsearchRetriever\n'), ((8249, 8475), 'haystack.retriever.dense.DensePassageRetriever', 'DensePassageRetriever', ([], {'document_store': 'document_store_with_docs', 'query_embedding_model': '"""facebook/dpr-question_encoder-single-nq-base"""', 'passage_embedding_model': '"""facebook/dpr-ctx_encoder-single-nq-base"""', 'use_gpu': '(False)'}), "(document_store=document_store_with_docs,\n query_embedding_model='facebook/dpr-question_encoder-single-nq-base',\n passage_embedding_model='facebook/dpr-ctx_encoder-single-nq-base',\n use_gpu=False)\n", (8270, 8475), False, 'from haystack.retriever.dense import DensePassageRetriever\n'), ((8644, 8676), 'haystack.pipeline.JoinDocuments', 'JoinDocuments', ([], {'join_mode': '"""merge"""'}), "(join_mode='merge')\n", (8657, 8676), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((8685, 8695), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (8693, 8695), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((9005, 9070), 'haystack.pipeline.JoinDocuments', 'JoinDocuments', ([], {'join_mode': '"""merge"""', 'weights': '[1000, 1]', 'top_k_join': '(2)'}), "(join_mode='merge', weights=[1000, 1], top_k_join=2)\n", (9018, 9070), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((9079, 9089), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (9087, 9089), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((9440, 9478), 'haystack.pipeline.JoinDocuments', 'JoinDocuments', ([], {'join_mode': '"""concatenate"""'}), "(join_mode='concatenate')\n", (9453, 9478), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((9487, 9497), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (9495, 9497), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((9810, 9825), 'haystack.pipeline.JoinDocuments', 'JoinDocuments', ([], {}), '()\n', (9823, 9825), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((9834, 9844), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (9842, 9844), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((11096, 11106), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (11104, 11106), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((11586, 11596), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (11594, 11596), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((13399, 13409), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (13407, 13409), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((13922, 13932), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (13930, 13932), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((14441, 14451), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (14449, 14451), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((624, 667), 'pathlib.Path', 'Path', (['"""samples/pipeline/test_pipeline.yaml"""'], {}), "('samples/pipeline/test_pipeline.yaml')\n", (628, 667), False, 'from pathlib import Path\n'), ((936, 979), 'pathlib.Path', 'Path', (['"""samples/pipeline/test_pipeline.yaml"""'], {}), "('samples/pipeline/test_pipeline.yaml')\n", (940, 979), False, 'from pathlib import Path\n'), ((1296, 1320), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1309, 1320), False, 'import pytest\n'), ((1807, 1836), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1820, 1836), False, 'import pytest\n'), ((1944, 1973), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1957, 1973), False, 'import pytest\n'), ((2089, 2113), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2102, 2113), False, 'import pytest\n'), ((2221, 2245), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2234, 2245), False, 'import pytest\n'), ((2266, 2276), 'haystack.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (2274, 2276), False, 'from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, DocumentSearchPipeline, RootNode\n'), ((770, 806), 'pathlib.Path', 'Path', (['"""samples/pdf/sample_pdf_1.pdf"""'], {}), "('samples/pdf/sample_pdf_1.pdf')\n", (774, 806), False, 'from pathlib import Path\n'), ((1359, 1402), 'pathlib.Path', 'Path', (['"""samples/pipeline/test_pipeline.yaml"""'], {}), "('samples/pipeline/test_pipeline.yaml')\n", (1363, 1402), False, 'from pathlib import Path\n')] |
# pylint: disable=protected-access
"""
Test the wrappers for the C API.
"""
import os
from contextlib import contextmanager
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
import xarray as xr
from packaging.version import Version
from pygmt import Figure, clib
from pygmt.clib.conversion import dataarray_to_matrix
from pygmt.clib.session import FAMILIES, VIAS
from pygmt.exceptions import (
GMTCLibError,
GMTCLibNoSessionError,
GMTInvalidInput,
GMTVersionError,
)
from pygmt.helpers import GMTTempFile
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
with clib.Session() as _lib:
gmt_version = Version(_lib.info["version"])
@contextmanager
def mock(session, func, returns=None, mock_func=None):
"""
Mock a GMT C API function to make it always return a given value.
Used to test that exceptions are raised when API functions fail by
producing a NULL pointer as output or non-zero status codes.
Needed because it's not easy to get some API functions to fail without
inducing a Segmentation Fault (which is a good thing because libgmt usually
only fails with errors).
"""
if mock_func is None:
def mock_api_function(*args): # pylint: disable=unused-argument
"""
A mock GMT API function that always returns a given value.
"""
return returns
mock_func = mock_api_function
get_libgmt_func = session.get_libgmt_func
def mock_get_libgmt_func(name, argtypes=None, restype=None):
"""
Return our mock function.
"""
if name == func:
return mock_func
return get_libgmt_func(name, argtypes, restype)
setattr(session, "get_libgmt_func", mock_get_libgmt_func)
yield
setattr(session, "get_libgmt_func", get_libgmt_func)
def test_getitem():
"""
Test that I can get correct constants from the C lib.
"""
ses = clib.Session()
assert ses["GMT_SESSION_EXTERNAL"] != -99999
assert ses["GMT_MODULE_CMD"] != -99999
assert ses["GMT_PAD_DEFAULT"] != -99999
assert ses["GMT_DOUBLE"] != -99999
with pytest.raises(GMTCLibError):
ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement
def test_create_destroy_session():
"""
Test that create and destroy session are called without errors.
"""
# Create two session and make sure they are not pointing to the same memory
session1 = clib.Session()
session1.create(name="test_session1")
assert session1.session_pointer is not None
session2 = clib.Session()
session2.create(name="test_session2")
assert session2.session_pointer is not None
assert session2.session_pointer != session1.session_pointer
session1.destroy()
session2.destroy()
# Create and destroy a session twice
ses = clib.Session()
for __ in range(2):
with pytest.raises(GMTCLibNoSessionError):
ses.session_pointer # pylint: disable=pointless-statement
ses.create("session1")
assert ses.session_pointer is not None
ses.destroy()
with pytest.raises(GMTCLibNoSessionError):
ses.session_pointer # pylint: disable=pointless-statement
def test_create_session_fails():
"""
Check that an exception is raised when failing to create a session.
"""
ses = clib.Session()
with mock(ses, "GMT_Create_Session", returns=None):
with pytest.raises(GMTCLibError):
ses.create("test-session-name")
# Should fail if trying to create a session before destroying the old one.
ses.create("test1")
with pytest.raises(GMTCLibError):
ses.create("test2")
def test_destroy_session_fails():
"""
Fail to destroy session when given bad input.
"""
ses = clib.Session()
with pytest.raises(GMTCLibNoSessionError):
ses.destroy()
ses.create("test-session")
with mock(ses, "GMT_Destroy_Session", returns=1):
with pytest.raises(GMTCLibError):
ses.destroy()
ses.destroy()
def test_call_module():
"""
Run a command to see if call_module works.
"""
data_fname = os.path.join(TEST_DATA_DIR, "points.txt")
out_fname = "test_call_module.txt"
with clib.Session() as lib:
with GMTTempFile() as out_fname:
lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name))
assert os.path.exists(out_fname.name)
output = out_fname.read().strip()
assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338"
def test_call_module_invalid_arguments():
"""
Fails for invalid module arguments.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.call_module("info", "bogus-data.bla")
def test_call_module_invalid_name():
"""
Fails when given bad input.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.call_module("meh", "")
def test_call_module_error_message():
"""
Check is the GMT error message was captured.
"""
with clib.Session() as lib:
try:
lib.call_module("info", "bogus-data.bla")
except GMTCLibError as error:
assert "Module 'info' failed with status code" in str(error)
assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error)
def test_method_no_session():
"""
Fails when not in a session.
"""
# Create an instance of Session without "with" so no session is created.
lib = clib.Session()
with pytest.raises(GMTCLibNoSessionError):
lib.call_module("gmtdefaults", "")
with pytest.raises(GMTCLibNoSessionError):
lib.session_pointer # pylint: disable=pointless-statement
def test_parse_constant_single():
"""
Parsing a single family argument correctly.
"""
lib = clib.Session()
for family in FAMILIES:
parsed = lib._parse_constant(family, valid=FAMILIES)
assert parsed == lib[family]
def test_parse_constant_composite():
"""
Parsing a composite constant argument (separated by |) correctly.
"""
lib = clib.Session()
test_cases = ((family, via) for family in FAMILIES for via in VIAS)
for family, via in test_cases:
composite = "|".join([family, via])
expected = lib[family] + lib[via]
parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS)
assert parsed == expected
def test_parse_constant_fails():
"""
Check if the function fails when given bad input.
"""
lib = clib.Session()
test_cases = [
"SOME_random_STRING",
"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR",
"GMT_IS_DATASET|NOT_A_PROPER_VIA",
"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX",
"NOT_A_PROPER_FAMILY|ALSO_INVALID",
]
for test_case in test_cases:
with pytest.raises(GMTInvalidInput):
lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS)
# Should also fail if not given valid modifiers but is using them anyway.
# This should work...
lib._parse_constant(
"GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS
)
# But this shouldn't.
with pytest.raises(GMTInvalidInput):
lib._parse_constant(
"GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None
)
def test_create_data_dataset():
"""
Run the function to make sure it doesn't fail badly.
"""
with clib.Session() as lib:
# Dataset from vectors
data_vector = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0], # columns, rows, layers, dtype
)
# Dataset from matrices
data_matrix = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_MATRIX",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0],
)
assert data_vector != data_matrix
def test_create_data_grid_dim():
"""
Create a grid ignoring range and inc.
"""
with clib.Session() as lib:
# Grids from matrices using dim
lib.create_data(
family="GMT_IS_GRID|GMT_VIA_MATRIX",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0],
)
def test_create_data_grid_range():
"""
Create a grid specifying range and inc instead of dim.
"""
with clib.Session() as lib:
# Grids from matrices using range and int
lib.create_data(
family="GMT_IS_GRID|GMT_VIA_MATRIX",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
def test_create_data_fails():
"""
Check that create_data raises exceptions for invalid input and output.
"""
# Passing in invalid mode
with pytest.raises(GMTInvalidInput):
with clib.Session() as lib:
lib.create_data(
family="GMT_IS_DATASET",
geometry="GMT_IS_SURFACE",
mode="Not_a_valid_mode",
dim=[0, 0, 1, 0],
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
# Passing in invalid geometry
with pytest.raises(GMTInvalidInput):
with clib.Session() as lib:
lib.create_data(
family="GMT_IS_GRID",
geometry="Not_a_valid_geometry",
mode="GMT_CONTAINER_ONLY",
dim=[0, 0, 1, 0],
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
# If the data pointer returned is None (NULL pointer)
with pytest.raises(GMTCLibError):
with clib.Session() as lib:
with mock(lib, "GMT_Create_Data", returns=None):
lib.create_data(
family="GMT_IS_DATASET",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
dim=[11, 10, 2, 0],
)
def test_virtual_file():
"""
Test passing in data via a virtual file with a Dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (5, 3)
for dtype in dtypes:
with clib.Session() as lib:
family = "GMT_IS_DATASET|GMT_VIA_MATRIX"
geometry = "GMT_IS_POINT"
dataset = lib.create_data(
family=family,
geometry=geometry,
mode="GMT_CONTAINER_ONLY",
dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype
)
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
lib.put_matrix(dataset, matrix=data)
# Add the dataset to a virtual file and pass it along to gmt info
vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset)
with lib.open_virtual_file(*vfargs) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds)
assert output == expected
def test_virtual_file_fails():
"""
Check that opening and closing virtual files raises an exception for non-
zero return codes.
"""
vfargs = (
"GMT_IS_DATASET|GMT_VIA_MATRIX",
"GMT_IS_POINT",
"GMT_IN|GMT_IS_REFERENCE",
None,
)
# Mock Open_VirtualFile to test the status check when entering the context.
# If the exception is raised, the code won't get to the closing of the
# virtual file.
with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1):
with pytest.raises(GMTCLibError):
with lib.open_virtual_file(*vfargs):
print("Should not get to this code")
# Test the status check when closing the virtual file
# Mock the opening to return 0 (success) so that we don't open a file that
# we won't close later.
with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock(
lib, "GMT_Close_VirtualFile", returns=1
):
with pytest.raises(GMTCLibError):
with lib.open_virtual_file(*vfargs):
pass
print("Shouldn't get to this code either")
def test_virtual_file_bad_direction():
"""
Test passing an invalid direction argument.
"""
with clib.Session() as lib:
vfargs = (
"GMT_IS_DATASET|GMT_VIA_MATRIX",
"GMT_IS_POINT",
"GMT_IS_GRID", # The invalid direction argument
0,
)
with pytest.raises(GMTInvalidInput):
with lib.open_virtual_file(*vfargs):
print("This should have failed")
def test_virtualfile_from_vectors():
"""
Test the automation for transforming vectors to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 10
for dtype in dtypes:
x = np.arange(size, dtype=dtype)
y = np.arange(size, size * 2, 1, dtype=dtype)
z = np.arange(size * 2, size * 3, 1, dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
@pytest.mark.parametrize("dtype", [str, object])
def test_virtualfile_from_vectors_one_string_or_object_column(dtype):
"""
Test passing in one column with string or object dtype into virtual file
dataset.
"""
size = 5
x = np.arange(size, dtype=np.int32)
y = np.arange(size, size * 2, 1, dtype=np.int32)
strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, strings) as vfile:
with GMTTempFile() as outfile:
lib.call_module("convert", f"{vfile} ->{outfile.name}")
output = outfile.read(keep_tabs=True)
expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings))
assert output == expected
@pytest.mark.parametrize("dtype", [str, object])
def test_virtualfile_from_vectors_two_string_or_object_columns(dtype):
"""
Test passing in two columns of string or object dtype into virtual file
dataset.
"""
size = 5
x = np.arange(size, dtype=np.int32)
y = np.arange(size, size * 2, 1, dtype=np.int32)
strings1 = np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype)
strings2 = np.array(["pqrst", "uvwx", "yz!", "@#", "$"], dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile:
with GMTTempFile() as outfile:
lib.call_module("convert", f"{vfile} ->{outfile.name}")
output = outfile.read(keep_tabs=True)
expected = "".join(
f"{h}\t{i}\t{j} {k}\n" for h, i, j, k in zip(x, y, strings1, strings2)
)
assert output == expected
def test_virtualfile_from_vectors_transpose():
"""
Test transforming matrix columns to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(*data.T) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} -C ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["{:.0f}\t{:.0f}".format(col.min(), col.max()) for col in data.T]
)
expected = "{}\n".format(bounds)
assert output == expected
def test_virtualfile_from_vectors_diff_size():
"""
Test the function fails for arrays of different sizes.
"""
x = np.arange(5)
y = np.arange(6)
with clib.Session() as lib:
with pytest.raises(GMTInvalidInput):
with lib.virtualfile_from_vectors(x, y):
print("This should have failed")
def test_virtualfile_from_matrix():
"""
Test transforming a matrix to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
with clib.Session() as lib:
with lib.virtualfile_from_matrix(data) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds)
assert output == expected
def test_virtualfile_from_matrix_slice():
"""
Test transforming a slice of a larger array to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (10, 6)
for dtype in dtypes:
full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
rows = 5
cols = 3
data = full_data[:rows, :cols]
with clib.Session() as lib:
with lib.virtualfile_from_matrix(data) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(rows, bounds)
assert output == expected
def test_virtualfile_from_vectors_pandas():
"""
Pass vectors to a dataset using pandas Series.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 13
for dtype in dtypes:
data = pd.DataFrame(
data=dict(
x=np.arange(size, dtype=dtype),
y=np.arange(size, size * 2, 1, dtype=dtype),
z=np.arange(size * 2, size * 3, 1, dtype=dtype),
)
)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
[
"<{:.0f}/{:.0f}>".format(i.min(), i.max())
for i in (data.x, data.y, data.z)
]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
def test_virtualfile_from_vectors_arraylike():
"""
Pass array-like vectors to a dataset.
"""
size = 13
x = list(range(0, size, 1))
y = tuple(range(size, size * 2, 1))
z = range(size * 2, size * 3, 1)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(min(i), max(i)) for i in (x, y, z)]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
def test_extract_region_fails():
"""
Check that extract region fails if nothing has been plotted.
"""
Figure()
with pytest.raises(GMTCLibError):
with clib.Session() as lib:
lib.extract_region()
def test_extract_region_two_figures():
"""
Extract region should handle multiple figures existing at the same time.
"""
# Make two figures before calling extract_region to make sure that it's
# getting from the current figure, not the last figure.
fig1 = Figure()
region1 = np.array([0, 10, -20, -10])
fig1.coast(region=region1, projection="M6i", frame=True, land="black")
fig2 = Figure()
fig2.basemap(region="US.HI+r5", projection="M6i", frame=True)
# Activate the first figure and extract the region from it
# Use in a different session to avoid any memory problems.
with clib.Session() as lib:
lib.call_module("figure", "{} -".format(fig1._name))
with clib.Session() as lib:
wesn1 = lib.extract_region()
npt.assert_allclose(wesn1, region1)
# Now try it with the second one
with clib.Session() as lib:
lib.call_module("figure", "{} -".format(fig2._name))
with clib.Session() as lib:
wesn2 = lib.extract_region()
npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0]))
def test_write_data_fails():
"""
Check that write data raises an exception for non-zero return codes.
"""
# It's hard to make the C API function fail without causing a Segmentation
# Fault. Can't test this if by giving a bad file name because if
# output=='', GMT will just write to stdout and spaces are valid file
# names. Use a mock instead just to exercise this part of the code.
with clib.Session() as lib:
with mock(lib, "GMT_Write_Data", returns=1):
with pytest.raises(GMTCLibError):
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
[1] * 6,
"some-file-name",
None,
)
def test_dataarray_to_matrix_works():
"""
Check that dataarray_to_matrix returns correct output.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=0, stop=4, num=3)
y = np.linspace(start=5, stop=9, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.flipud(data))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1] - y[0]])
def test_dataarray_to_matrix_negative_x_increment():
"""
Check if dataarray_to_matrix returns correct output with flipped x.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=4, stop=0, num=3)
y = np.linspace(start=5, stop=9, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1)))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_negative_y_increment():
"""
Check that dataarray_to_matrix returns correct output with flipped y.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=0, stop=4, num=3)
y = np.linspace(start=9, stop=5, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=data)
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_negative_x_and_y_increment():
"""
Check that dataarray_to_matrix returns correct output with flipped x/y.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=4, stop=0, num=3)
y = np.linspace(start=9, stop=5, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.fliplr(data))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_dims_fails():
"""
Check that it fails for > 2 dims.
"""
# Make a 3D regular grid
data = np.ones((10, 12, 11), dtype="float32")
x = np.arange(11)
y = np.arange(12)
z = np.arange(10)
grid = xr.DataArray(data, coords=[("z", z), ("y", y), ("x", x)])
with pytest.raises(GMTInvalidInput):
dataarray_to_matrix(grid)
def test_dataarray_to_matrix_inc_fails():
"""
Check that it fails for variable increments.
"""
data = np.ones((4, 5), dtype="float64")
x = np.linspace(0, 1, 5)
y = np.logspace(2, 3, 4)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
with pytest.raises(GMTInvalidInput):
dataarray_to_matrix(grid)
def test_get_default():
"""
Make sure get_default works without crashing and gives reasonable results.
"""
with clib.Session() as lib:
assert lib.get_default("API_GRID_LAYOUT") in ["rows", "columns"]
assert int(lib.get_default("API_CORES")) >= 1
assert Version(lib.get_default("API_VERSION")) >= Version("6.2.0")
def test_get_default_fails():
"""
Make sure get_default raises an exception for invalid names.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.get_default("NOT_A_VALID_NAME")
def test_info_dict():
"""
Make sure the clib.Session.info dict is working.
"""
# Check if there are no errors or segfaults from getting all of the
# properties.
with clib.Session() as lib:
assert lib.info
# Mock GMT_Get_Default to return always the same string
def mock_defaults(api, name, value): # pylint: disable=unused-argument
"""
Put 'bla' in the value buffer.
"""
value.value = b"bla"
return 0
ses = clib.Session()
ses.create("test-session")
with mock(ses, "GMT_Get_Default", mock_func=mock_defaults):
# Check for an empty dictionary
assert ses.info
for key in ses.info:
assert ses.info[key] == "bla"
ses.destroy()
def test_fails_for_wrong_version():
"""
Make sure the clib.Session raises an exception if GMT is too old.
"""
# Mock GMT_Get_Default to return an old version
def mock_defaults(api, name, value): # pylint: disable=unused-argument
"""
Return an old version.
"""
if name == b"API_VERSION":
value.value = b"5.4.3"
else:
value.value = b"bla"
return 0
lib = clib.Session()
with mock(lib, "GMT_Get_Default", mock_func=mock_defaults):
with pytest.raises(GMTVersionError):
with lib:
assert lib.info["version"] != "5.4.3"
# Make sure the session is closed when the exception is raised.
with pytest.raises(GMTCLibNoSessionError):
assert lib.session_pointer
| [
"pygmt.clib.Session",
"numpy.array",
"pygmt.Figure",
"numpy.arange",
"os.path.exists",
"numpy.flip",
"pygmt.helpers.GMTTempFile",
"numpy.testing.assert_allclose",
"pygmt.clib.conversion.dataarray_to_matrix",
"numpy.linspace",
"numpy.logspace",
"numpy.ones",
"numpy.flipud",
"numpy.fliplr",
"os.path.dirname",
"pytest.raises",
"os.path.join",
"pytest.mark.parametrize",
"packaging.version.Version",
"xarray.DataArray"
] | [((14171, 14218), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[str, object]'], {}), "('dtype', [str, object])\n", (14194, 14218), False, 'import pytest\n'), ((14965, 15012), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[str, object]'], {}), "('dtype', [str, object])\n", (14988, 15012), False, 'import pytest\n'), ((583, 608), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (598, 608), False, 'import os\n'), ((624, 638), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (636, 638), False, 'from pygmt import Figure, clib\n'), ((666, 695), 'packaging.version.Version', 'Version', (["_lib.info['version']"], {}), "(_lib.info['version'])\n", (673, 695), False, 'from packaging.version import Version\n'), ((1965, 1979), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (1977, 1979), False, 'from pygmt import Figure, clib\n'), ((2483, 2497), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (2495, 2497), False, 'from pygmt import Figure, clib\n'), ((2603, 2617), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (2615, 2617), False, 'from pygmt import Figure, clib\n'), ((2869, 2883), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (2881, 2883), False, 'from pygmt import Figure, clib\n'), ((3385, 3399), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (3397, 3399), False, 'from pygmt import Figure, clib\n'), ((3823, 3837), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (3835, 3837), False, 'from pygmt import Figure, clib\n'), ((4184, 4225), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', '"""points.txt"""'], {}), "(TEST_DATA_DIR, 'points.txt')\n", (4196, 4225), False, 'import os\n'), ((5590, 5604), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (5602, 5604), False, 'from pygmt import Figure, clib\n'), ((5919, 5933), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (5931, 5933), False, 'from pygmt import Figure, clib\n'), ((6195, 6209), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (6207, 6209), False, 'from pygmt import Figure, clib\n'), ((6638, 6652), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (6650, 6652), False, 'from pygmt import Figure, clib\n'), ((14416, 14447), 'numpy.arange', 'np.arange', (['size'], {'dtype': 'np.int32'}), '(size, dtype=np.int32)\n', (14425, 14447), True, 'import numpy as np\n'), ((14456, 14500), 'numpy.arange', 'np.arange', (['size', '(size * 2)', '(1)'], {'dtype': 'np.int32'}), '(size, size * 2, 1, dtype=np.int32)\n', (14465, 14500), True, 'import numpy as np\n'), ((14515, 14578), 'numpy.array', 'np.array', (["['a', 'bc', 'defg', 'hijklmn', 'opqrst']"], {'dtype': 'dtype'}), "(['a', 'bc', 'defg', 'hijklmn', 'opqrst'], dtype=dtype)\n", (14523, 14578), True, 'import numpy as np\n'), ((15210, 15241), 'numpy.arange', 'np.arange', (['size'], {'dtype': 'np.int32'}), '(size, dtype=np.int32)\n', (15219, 15241), True, 'import numpy as np\n'), ((15250, 15294), 'numpy.arange', 'np.arange', (['size', '(size * 2)', '(1)'], {'dtype': 'np.int32'}), '(size, size * 2, 1, dtype=np.int32)\n', (15259, 15294), True, 'import numpy as np\n'), ((15310, 15368), 'numpy.array', 'np.array', (["['a', 'bc', 'def', 'ghij', 'klmno']"], {'dtype': 'dtype'}), "(['a', 'bc', 'def', 'ghij', 'klmno'], dtype=dtype)\n", (15318, 15368), True, 'import numpy as np\n'), ((15384, 15442), 'numpy.array', 'np.array', (["['pqrst', 'uvwx', 'yz!', '@#', '$']"], {'dtype': 'dtype'}), "(['pqrst', 'uvwx', 'yz!', '@#', '$'], dtype=dtype)\n", (15392, 15442), True, 'import numpy as np\n'), ((16822, 16834), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (16831, 16834), True, 'import numpy as np\n'), ((16843, 16855), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (16852, 16855), True, 'import numpy as np\n'), ((20704, 20712), 'pygmt.Figure', 'Figure', ([], {}), '()\n', (20710, 20712), False, 'from pygmt import Figure, clib\n'), ((21101, 21109), 'pygmt.Figure', 'Figure', ([], {}), '()\n', (21107, 21109), False, 'from pygmt import Figure, clib\n'), ((21124, 21151), 'numpy.array', 'np.array', (['[0, 10, -20, -10]'], {}), '([0, 10, -20, -10])\n', (21132, 21151), True, 'import numpy as np\n'), ((21239, 21247), 'pygmt.Figure', 'Figure', ([], {}), '()\n', (21245, 21247), False, 'from pygmt import Figure, clib\n'), ((22878, 22913), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(4)', 'num': '(3)'}), '(start=0, stop=4, num=3)\n', (22889, 22913), True, 'import numpy as np\n'), ((22922, 22957), 'numpy.linspace', 'np.linspace', ([], {'start': '(5)', 'stop': '(9)', 'num': '(3)'}), '(start=5, stop=9, num=3)\n', (22933, 22957), True, 'import numpy as np\n'), ((22969, 23016), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('y', y), ('x', x)]"}), "(data, coords=[('y', y), ('x', x)])\n", (22981, 23016), True, 'import xarray as xr\n'), ((23044, 23069), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (23063, 23069), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((23223, 23290), 'numpy.testing.assert_allclose', 'npt.assert_allclose', ([], {'actual': 'inc', 'desired': '[x[1] - x[0], y[1] - y[0]]'}), '(actual=inc, desired=[x[1] - x[0], y[1] - y[0]])\n', (23242, 23290), True, 'import numpy.testing as npt\n'), ((23477, 23512), 'numpy.linspace', 'np.linspace', ([], {'start': '(4)', 'stop': '(0)', 'num': '(3)'}), '(start=4, stop=0, num=3)\n', (23488, 23512), True, 'import numpy as np\n'), ((23521, 23556), 'numpy.linspace', 'np.linspace', ([], {'start': '(5)', 'stop': '(9)', 'num': '(3)'}), '(start=5, stop=9, num=3)\n', (23532, 23556), True, 'import numpy as np\n'), ((23568, 23615), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('y', y), ('x', x)]"}), "(data, coords=[('y', y), ('x', x)])\n", (23580, 23615), True, 'import xarray as xr\n'), ((23643, 23668), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (23662, 23668), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((24099, 24134), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(4)', 'num': '(3)'}), '(start=0, stop=4, num=3)\n', (24110, 24134), True, 'import numpy as np\n'), ((24143, 24178), 'numpy.linspace', 'np.linspace', ([], {'start': '(9)', 'stop': '(5)', 'num': '(3)'}), '(start=9, stop=5, num=3)\n', (24154, 24178), True, 'import numpy as np\n'), ((24190, 24237), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('y', y), ('x', x)]"}), "(data, coords=[('y', y), ('x', x)])\n", (24202, 24237), True, 'import xarray as xr\n'), ((24265, 24290), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (24284, 24290), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((24295, 24343), 'numpy.testing.assert_allclose', 'npt.assert_allclose', ([], {'actual': 'matrix', 'desired': 'data'}), '(actual=matrix, desired=data)\n', (24314, 24343), True, 'import numpy.testing as npt\n'), ((24707, 24742), 'numpy.linspace', 'np.linspace', ([], {'start': '(4)', 'stop': '(0)', 'num': '(3)'}), '(start=4, stop=0, num=3)\n', (24718, 24742), True, 'import numpy as np\n'), ((24751, 24786), 'numpy.linspace', 'np.linspace', ([], {'start': '(9)', 'stop': '(5)', 'num': '(3)'}), '(start=9, stop=5, num=3)\n', (24762, 24786), True, 'import numpy as np\n'), ((24798, 24845), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('y', y), ('x', x)]"}), "(data, coords=[('y', y), ('x', x)])\n", (24810, 24845), True, 'import xarray as xr\n'), ((24873, 24898), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (24892, 24898), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((25269, 25307), 'numpy.ones', 'np.ones', (['(10, 12, 11)'], {'dtype': '"""float32"""'}), "((10, 12, 11), dtype='float32')\n", (25276, 25307), True, 'import numpy as np\n'), ((25316, 25329), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (25325, 25329), True, 'import numpy as np\n'), ((25338, 25351), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (25347, 25351), True, 'import numpy as np\n'), ((25360, 25373), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (25369, 25373), True, 'import numpy as np\n'), ((25385, 25442), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('z', z), ('y', y), ('x', x)]"}), "(data, coords=[('z', z), ('y', y), ('x', x)])\n", (25397, 25442), True, 'import xarray as xr\n'), ((25638, 25670), 'numpy.ones', 'np.ones', (['(4, 5)'], {'dtype': '"""float64"""'}), "((4, 5), dtype='float64')\n", (25645, 25670), True, 'import numpy as np\n'), ((25679, 25699), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (25690, 25699), True, 'import numpy as np\n'), ((25708, 25728), 'numpy.logspace', 'np.logspace', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (25719, 25728), True, 'import numpy as np\n'), ((25740, 25787), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('y', y), ('x', x)]"}), "(data, coords=[('y', y), ('x', x)])\n", (25752, 25787), True, 'import xarray as xr\n'), ((26949, 26963), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (26961, 26963), False, 'from pygmt import Figure, clib\n'), ((27665, 27679), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (27677, 27679), False, 'from pygmt import Figure, clib\n'), ((2164, 2191), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (2177, 2191), False, 'import pytest\n'), ((3654, 3681), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (3667, 3681), False, 'import pytest\n'), ((3847, 3883), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (3860, 3883), False, 'import pytest\n'), ((4274, 4288), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (4286, 4288), False, 'from pygmt import Figure, clib\n'), ((4704, 4718), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (4716, 4718), False, 'from pygmt import Figure, clib\n'), ((4919, 4933), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (4931, 4933), False, 'from pygmt import Figure, clib\n'), ((5137, 5151), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (5149, 5151), False, 'from pygmt import Figure, clib\n'), ((5614, 5650), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (5627, 5650), False, 'import pytest\n'), ((5704, 5740), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (5717, 5740), False, 'import pytest\n'), ((7305, 7335), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (7318, 7335), False, 'import pytest\n'), ((7574, 7588), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (7586, 7588), False, 'from pygmt import Figure, clib\n'), ((8254, 8268), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (8266, 8268), False, 'from pygmt import Figure, clib\n'), ((8632, 8646), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (8644, 8646), False, 'from pygmt import Figure, clib\n'), ((9105, 9135), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (9118, 9135), False, 'import pytest\n'), ((9502, 9532), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (9515, 9532), False, 'import pytest\n'), ((9929, 9956), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (9942, 9956), False, 'import pytest\n'), ((12110, 12124), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (12122, 12124), False, 'from pygmt import Figure, clib\n'), ((12498, 12512), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (12510, 12512), False, 'from pygmt import Figure, clib\n'), ((12909, 12923), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (12921, 12923), False, 'from pygmt import Figure, clib\n'), ((13498, 13526), 'numpy.arange', 'np.arange', (['size'], {'dtype': 'dtype'}), '(size, dtype=dtype)\n', (13507, 13526), True, 'import numpy as np\n'), ((13539, 13580), 'numpy.arange', 'np.arange', (['size', '(size * 2)', '(1)'], {'dtype': 'dtype'}), '(size, size * 2, 1, dtype=dtype)\n', (13548, 13580), True, 'import numpy as np\n'), ((13593, 13638), 'numpy.arange', 'np.arange', (['(size * 2)', '(size * 3)', '(1)'], {'dtype': 'dtype'}), '(size * 2, size * 3, 1, dtype=dtype)\n', (13602, 13638), True, 'import numpy as np\n'), ((14588, 14602), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (14600, 14602), False, 'from pygmt import Figure, clib\n'), ((15452, 15466), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (15464, 15466), False, 'from pygmt import Figure, clib\n'), ((16865, 16879), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (16877, 16879), False, 'from pygmt import Figure, clib\n'), ((20106, 20120), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (20118, 20120), False, 'from pygmt import Figure, clib\n'), ((20722, 20749), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (20735, 20749), False, 'import pytest\n'), ((21450, 21464), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (21462, 21464), False, 'from pygmt import Figure, clib\n'), ((21543, 21557), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (21555, 21557), False, 'from pygmt import Figure, clib\n'), ((21611, 21646), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['wesn1', 'region1'], {}), '(wesn1, region1)\n', (21630, 21646), True, 'import numpy.testing as npt\n'), ((21694, 21708), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (21706, 21708), False, 'from pygmt import Figure, clib\n'), ((21787, 21801), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (21799, 21801), False, 'from pygmt import Figure, clib\n'), ((22345, 22359), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (22357, 22359), False, 'from pygmt import Figure, clib\n'), ((25452, 25482), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (25465, 25482), False, 'import pytest\n'), ((25492, 25517), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (25511, 25517), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((25797, 25827), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (25810, 25827), False, 'import pytest\n'), ((25837, 25862), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (25856, 25862), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((25993, 26007), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (26005, 26007), False, 'from pygmt import Figure, clib\n'), ((26340, 26354), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (26352, 26354), False, 'from pygmt import Figure, clib\n'), ((26645, 26659), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (26657, 26659), False, 'from pygmt import Figure, clib\n'), ((27942, 27978), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (27955, 27978), False, 'import pytest\n'), ((2921, 2957), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (2934, 2957), False, 'import pytest\n'), ((3143, 3179), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (3156, 3179), False, 'import pytest\n'), ((3469, 3496), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (3482, 3496), False, 'import pytest\n'), ((4005, 4032), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (4018, 4032), False, 'import pytest\n'), ((4310, 4323), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (4321, 4323), False, 'from pygmt.helpers import GMTTempFile\n'), ((4442, 4472), 'os.path.exists', 'os.path.exists', (['out_fname.name'], {}), '(out_fname.name)\n', (4456, 4472), False, 'import os\n'), ((4740, 4767), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (4753, 4767), False, 'import pytest\n'), ((4955, 4982), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (4968, 4982), False, 'import pytest\n'), ((6943, 6973), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (6956, 6973), False, 'import pytest\n'), ((9150, 9164), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (9162, 9164), False, 'from pygmt import Figure, clib\n'), ((9547, 9561), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (9559, 9561), False, 'from pygmt import Figure, clib\n'), ((9971, 9985), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (9983, 9985), False, 'from pygmt import Figure, clib\n'), ((10510, 10524), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (10522, 10524), False, 'from pygmt import Figure, clib\n'), ((12192, 12219), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (12205, 12219), False, 'import pytest\n'), ((12641, 12668), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (12654, 12668), False, 'import pytest\n'), ((13123, 13153), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (13136, 13153), False, 'import pytest\n'), ((13652, 13666), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (13664, 13666), False, 'from pygmt import Figure, clib\n'), ((16200, 16214), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (16212, 16214), False, 'from pygmt import Figure, clib\n'), ((16901, 16931), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (16914, 16931), False, 'import pytest\n'), ((17341, 17355), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (17353, 17355), False, 'from pygmt import Figure, clib\n'), ((18268, 18282), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (18280, 18282), False, 'from pygmt import Figure, clib\n'), ((19263, 19277), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (19275, 19277), False, 'from pygmt import Figure, clib\n'), ((20764, 20778), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (20776, 20778), False, 'from pygmt import Figure, clib\n'), ((21882, 21920), 'numpy.array', 'np.array', (['[-165.0, -150.0, 15.0, 25.0]'], {}), '([-165.0, -150.0, 15.0, 25.0])\n', (21890, 21920), True, 'import numpy as np\n'), ((22856, 22868), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (22865, 22868), True, 'import numpy as np\n'), ((23117, 23132), 'numpy.flipud', 'np.flipud', (['data'], {}), '(data)\n', (23126, 23132), True, 'import numpy as np\n'), ((23455, 23467), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (23464, 23467), True, 'import numpy as np\n'), ((23716, 23742), 'numpy.flip', 'np.flip', (['data'], {'axis': '(0, 1)'}), '(data, axis=(0, 1))\n', (23723, 23742), True, 'import numpy as np\n'), ((24077, 24089), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (24086, 24089), True, 'import numpy as np\n'), ((24685, 24697), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (24694, 24697), True, 'import numpy as np\n'), ((24946, 24961), 'numpy.fliplr', 'np.fliplr', (['data'], {}), '(data)\n', (24955, 24961), True, 'import numpy as np\n'), ((26201, 26217), 'packaging.version.Version', 'Version', (['"""6.2.0"""'], {}), "('6.2.0')\n", (26208, 26217), False, 'from packaging.version import Version\n'), ((26376, 26403), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (26389, 26403), False, 'import pytest\n'), ((27757, 27787), 'pytest.raises', 'pytest.raises', (['GMTVersionError'], {}), '(GMTVersionError)\n', (27770, 27787), False, 'import pytest\n'), ((14695, 14708), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (14706, 14708), False, 'from pygmt.helpers import GMTTempFile\n'), ((15570, 15583), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (15581, 15583), False, 'from pygmt.helpers import GMTTempFile\n'), ((16128, 16171), 'numpy.arange', 'np.arange', (['(shape[0] * shape[1])'], {'dtype': 'dtype'}), '(shape[0] * shape[1], dtype=dtype)\n', (16137, 16171), True, 'import numpy as np\n'), ((17269, 17312), 'numpy.arange', 'np.arange', (['(shape[0] * shape[1])'], {'dtype': 'dtype'}), '(shape[0] * shape[1], dtype=dtype)\n', (17278, 17312), True, 'import numpy as np\n'), ((18123, 18166), 'numpy.arange', 'np.arange', (['(shape[0] * shape[1])'], {'dtype': 'dtype'}), '(shape[0] * shape[1], dtype=dtype)\n', (18132, 18166), True, 'import numpy as np\n'), ((20207, 20220), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (20218, 20220), False, 'from pygmt.helpers import GMTTempFile\n'), ((22438, 22465), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (22451, 22465), False, 'import pytest\n'), ((10885, 10928), 'numpy.arange', 'np.arange', (['(shape[0] * shape[1])'], {'dtype': 'dtype'}), '(shape[0] * shape[1], dtype=dtype)\n', (10894, 10928), True, 'import numpy as np\n'), ((11226, 11239), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (11237, 11239), False, 'from pygmt.helpers import GMTTempFile\n'), ((13761, 13774), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (13772, 13774), False, 'from pygmt.helpers import GMTTempFile\n'), ((16309, 16322), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (16320, 16322), False, 'from pygmt.helpers import GMTTempFile\n'), ((17446, 17459), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (17457, 17459), False, 'from pygmt.helpers import GMTTempFile\n'), ((18373, 18386), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (18384, 18386), False, 'from pygmt.helpers import GMTTempFile\n'), ((19387, 19400), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (19398, 19400), False, 'from pygmt.helpers import GMTTempFile\n'), ((19070, 19098), 'numpy.arange', 'np.arange', (['size'], {'dtype': 'dtype'}), '(size, dtype=dtype)\n', (19079, 19098), True, 'import numpy as np\n'), ((19118, 19159), 'numpy.arange', 'np.arange', (['size', '(size * 2)', '(1)'], {'dtype': 'dtype'}), '(size, size * 2, 1, dtype=dtype)\n', (19127, 19159), True, 'import numpy as np\n'), ((19179, 19224), 'numpy.arange', 'np.arange', (['(size * 2)', '(size * 3)', '(1)'], {'dtype': 'dtype'}), '(size * 2, size * 3, 1, dtype=dtype)\n', (19188, 19224), True, 'import numpy as np\n')] |
from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
if not is_signed_integer_dtype(target):
# checks/conversions/roundings are delegated to general method
return super()._get_indexer(target, method=method, tolerance=tolerance)
target_array = np.asarray(target)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
allow_fill=allow_fill,
fill_value=fill_value,
**kwargs,
)
def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(
self,
name: Hashable = None,
deep: bool = False,
dtype: Dtype | None = None,
names=None,
):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._rename(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
np.ndarray[np.intp]
See Also
--------
numpy.ndarray.argsort
"""
ascending = kwargs.pop("ascending", True) # EA compat
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
result = np.arange(len(self), dtype=np.intp)
else:
result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
if not ascending:
result = result[::-1]
return result
def factorize(
self, sort: bool = False, na_sentinel: int | None = -1
) -> tuple[np.ndarray, RangeIndex]:
codes = np.arange(len(self), dtype=np.intp)
uniques = self
if sort and self.step < 0:
codes = codes[::-1]
uniques = uniques[::-1]
return codes, uniques
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
# --------------------------------------------------------------------
# Set Operations
def _intersection(self, other: Index, sort=False):
if not isinstance(other, RangeIndex):
# Int64Index
return super()._intersection(other, sort=sort)
if not len(self) or not len(other):
return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, _ = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other: Index, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (len(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (len(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
def _difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not isinstance(other, RangeIndex):
return super()._difference(other, sort=sort)
res_name = ops.get_op_result_name(self, other)
first = self._range[::-1] if self.step < 0 else self._range
overlap = self.intersection(other)
if overlap.step < 0:
overlap = overlap[::-1]
if len(overlap) == 0:
return self.rename(name=res_name)
if len(overlap) == len(self):
return self[:0].rename(res_name)
if not isinstance(overlap, RangeIndex):
# We won't end up with RangeIndex, so fall back
return super()._difference(other, sort=sort)
if overlap.step != first.step:
# In some cases we might be able to get a RangeIndex back,
# but not worth the effort.
return super()._difference(other, sort=sort)
if overlap[0] == first.start:
# The difference is everything after the intersection
new_rng = range(overlap[-1] + first.step, first.stop, first.step)
elif overlap[-1] == first[-1]:
# The difference is everything before the intersection
new_rng = range(first.start, overlap[0], first.step)
else:
# The difference is not range-like
return super()._difference(other, sort=sort)
new_index = type(self)._simple_new(new_rng, name=res_name)
if first is not self._range:
new_index = new_index[::-1]
return new_index
def symmetric_difference(self, other, result_name: Hashable = None, sort=None):
if not isinstance(other, RangeIndex) or sort is not None:
return super().symmetric_difference(other, result_name, sort)
left = self.difference(other)
right = other.difference(self)
result = left.union(right)
if result_name is not None:
result = result.rename(result_name)
return result
# --------------------------------------------------------------------
def _concat(self, indexes: list[Index], name: Hashable) -> Index:
"""
Overriding parent method for the case of all RangeIndex instances.
When all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Int64Index otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
if not all(isinstance(x, RangeIndex) for x in indexes):
return super()._concat(indexes, name)
elif len(indexes) == 1:
return indexes[0]
rng_indexes = cast(List[RangeIndex], indexes)
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in rng_indexes if len(obj)]
for obj in non_empty_indexes:
rng = obj._range
if start is None:
# This is set by the first non-empty index
start = rng.start
if step is None and len(rng) > 1:
step = rng.step
elif step is None:
# First non-empty index had only one element
if rng.start == start:
values = np.concatenate([x._values for x in rng_indexes])
result = Int64Index(values)
return result.rename(name)
step = rng.start - start
non_consecutive = (step != rng.step and len(rng) > 1) or (
next_ is not None and rng.start != next_
)
if non_consecutive:
result = Int64Index(np.concatenate([x._values for x in rng_indexes]))
return result.rename(name)
if step is not None:
next_ = rng[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step).rename(name)
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0).rename(name)
def __len__(self) -> int:
"""
return the length of the RangeIndex
"""
return len(self._range)
@property
def size(self) -> int:
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
if isinstance(key, slice):
new_range = self._range[key]
return self._simple_new(new_range, name=self._name)
elif is_integer(key):
new_key = int(key)
try:
return self._range[new_key]
except IndexError as err:
raise IndexError(
f"index {key} is out of bounds for axis 0 with size {len(self)}"
) from err
elif is_scalar(key):
raise IndexError(
"only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices"
)
# fall back to Int64Index
return super().__getitem__(key)
def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._range[slobj]
return type(self)._simple_new(res, name=self._name)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_integer(other) and other != 0:
if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
start = self.start // other
step = self.step // other
stop = start + len(self) * step
new_range = range(start, stop, step or 1)
return self._simple_new(new_range, name=self.name)
if len(self) == 1:
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
return self._int64index // other
# --------------------------------------------------------------------
# Reductions
def all(self, *args, **kwargs) -> bool:
return 0 not in self._range
def any(self, *args, **kwargs) -> bool:
return any(self._range)
# --------------------------------------------------------------------
def _cmp_method(self, other, op):
if isinstance(other, RangeIndex) and self._range == other._range:
# Both are immutable so if ._range attr. are equal, shortcut is possible
return super()._cmp_method(self, op)
return super()._cmp_method(other, op)
def _arith_method(self, other, op):
"""
Parameters
----------
other : Any
op : callable that accepts 2 params
perform the binary op
"""
if isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
if op in [
operator.pow,
ops.rpow,
operator.mod,
ops.rmod,
ops.rfloordiv,
divmod,
ops.rdivmod,
]:
return op(self._int64index, other)
step: Callable | None = None
if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
step = op
# TODO: if other is a RangeIndex we may have more efficient options
other = extract_array(other, extract_numpy=True, extract_range=True)
attrs = self._get_attributes_dict()
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all="ignore"):
rstep = step(left.step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left.step
with np.errstate(all="ignore"):
rstart = op(left.start, right)
rstop = op(left.stop, right)
result = type(self)(rstart, rstop, rstep, **attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in [rstart, rstop, rstep]):
result = result.astype("float64")
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
| [
"pandas.core.construction.extract_array",
"pandas.compat.numpy.function.validate_argsort",
"pandas.core.dtypes.common.is_scalar",
"numpy.arange",
"pandas.compat.numpy.function.validate_min",
"pandas.core.indexes.numeric.Int64Index",
"pandas.core.common.any_not_none",
"sys.getsizeof",
"numpy.asarray",
"pandas.core.indexes.base.maybe_extract_name",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.compat.numpy.function.validate_max",
"numpy.concatenate",
"warnings.warn",
"numpy.dtype",
"pandas.core.indexes.base.default_pprint",
"pandas.core.dtypes.common.is_signed_integer_dtype",
"pandas.core.common.all_none",
"pandas.compat.numpy.function.validate_minmax_axis",
"pandas.core.dtypes.common.is_float",
"pandas.util._decorators.doc",
"pandas.core.dtypes.common.ensure_python_int",
"typing.cast",
"pandas.core.ops.get_op_result_name",
"pandas.core.ops.common.unpack_zerodim_and_defer",
"pandas.core.indexes.numeric.Float64Index",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"numpy.errstate",
"pandas.core.indexes.numeric.Int64Index._simple_new"
] | [((10265, 10288), 'pandas.util._decorators.doc', 'doc', (['Int64Index.get_loc'], {}), '(Int64Index.get_loc)\n', (10268, 10288), False, 'from pandas.util._decorators import cache_readonly, doc\n'), ((12844, 12868), 'pandas.util._decorators.doc', 'doc', (['Int64Index.__iter__'], {}), '(Int64Index.__iter__)\n', (12847, 12868), False, 'from pandas.util._decorators import cache_readonly, doc\n'), ((12930, 12959), 'pandas.util._decorators.doc', 'doc', (['Int64Index._shallow_copy'], {}), '(Int64Index._shallow_copy)\n', (12933, 12959), False, 'from pandas.util._decorators import cache_readonly, doc\n'), ((13411, 13431), 'pandas.util._decorators.doc', 'doc', (['Int64Index.copy'], {}), '(Int64Index.copy)\n', (13414, 13431), False, 'from pandas.util._decorators import cache_readonly, doc\n'), ((27681, 27721), 'pandas.core.ops.common.unpack_zerodim_and_defer', 'unpack_zerodim_and_defer', (['"""__floordiv__"""'], {}), "('__floordiv__')\n", (27705, 27721), False, 'from pandas.core.ops.common import unpack_zerodim_and_defer\n'), ((2834, 2870), 'pandas.core.indexes.base.maybe_extract_name', 'maybe_extract_name', (['name', 'start', 'cls'], {}), '(name, start, cls)\n', (2852, 2870), False, 'from pandas.core.indexes.base import maybe_extract_name\n'), ((3113, 3144), 'pandas.core.common.all_none', 'com.all_none', (['start', 'stop', 'step'], {}), '(start, stop, step)\n', (3125, 3144), True, 'import pandas.core.common as com\n'), ((4958, 5017), 'numpy.arange', 'np.arange', (['self.start', 'self.stop', 'self.step'], {'dtype': 'np.int64'}), '(self.start, self.stop, self.step, dtype=np.int64)\n', (4967, 5017), True, 'import numpy as np\n'), ((5102, 5152), 'pandas.core.indexes.numeric.Int64Index._simple_new', 'Int64Index._simple_new', (['self._data'], {'name': 'self.name'}), '(self._data, name=self.name)\n', (5124, 5152), False, 'from pandas.core.indexes.numeric import Float64Index, Int64Index, NumericIndex\n'), ((9495, 9513), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (9503, 9513), True, 'import numpy as np\n'), ((10990, 11032), 'pandas.core.common.any_not_none', 'com.any_not_none', (['method', 'tolerance', 'limit'], {}), '(method, tolerance, limit)\n', (11006, 11032), True, 'import pandas.core.common as com\n'), ((11675, 11693), 'numpy.asarray', 'np.asarray', (['target'], {}), '(target)\n', (11685, 11693), True, 'import numpy as np\n'), ((12035, 12060), 'pandas.core.dtypes.common.ensure_platform_int', 'ensure_platform_int', (['locs'], {}), '(locs)\n', (12054, 12060), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((13187, 13228), 'pandas.core.indexes.numeric.Int64Index._simple_new', 'Int64Index._simple_new', (['values'], {'name': 'name'}), '(values, name=name)\n', (13209, 13228), False, 'from pandas.core.indexes.numeric import Float64Index, Int64Index, NumericIndex\n'), ((14454, 14483), 'pandas.compat.numpy.function.validate_minmax_axis', 'nv.validate_minmax_axis', (['axis'], {}), '(axis)\n', (14477, 14483), True, 'from pandas.compat.numpy import function as nv\n'), ((14492, 14521), 'pandas.compat.numpy.function.validate_min', 'nv.validate_min', (['args', 'kwargs'], {}), '(args, kwargs)\n', (14507, 14521), True, 'from pandas.compat.numpy import function as nv\n'), ((14691, 14720), 'pandas.compat.numpy.function.validate_minmax_axis', 'nv.validate_minmax_axis', (['axis'], {}), '(axis)\n', (14714, 14720), True, 'from pandas.compat.numpy import function as nv\n'), ((14729, 14758), 'pandas.compat.numpy.function.validate_max', 'nv.validate_max', (['args', 'kwargs'], {}), '(args, kwargs)\n', (14744, 14758), True, 'from pandas.compat.numpy import function as nv\n'), ((15157, 15190), 'pandas.compat.numpy.function.validate_argsort', 'nv.validate_argsort', (['args', 'kwargs'], {}), '(args, kwargs)\n', (15176, 15190), True, 'from pandas.compat.numpy import function as nv\n'), ((22152, 22187), 'pandas.core.ops.get_op_result_name', 'ops.get_op_result_name', (['self', 'other'], {}), '(self, other)\n', (22174, 22187), False, 'from pandas.core import ops\n'), ((24717, 24748), 'typing.cast', 'cast', (['List[RangeIndex]', 'indexes'], {}), '(List[RangeIndex], indexes)\n', (24721, 24748), False, 'from typing import TYPE_CHECKING, Any, Callable, Hashable, List, cast\n'), ((30176, 30236), 'pandas.core.construction.extract_array', 'extract_array', (['other'], {'extract_numpy': '(True)', 'extract_range': '(True)'}), '(other, extract_numpy=True, extract_range=True)\n', (30189, 30236), False, 'from pandas.core.construction import extract_array\n'), ((3239, 3263), 'pandas.core.dtypes.common.ensure_python_int', 'ensure_python_int', (['start'], {}), '(start)\n', (3256, 3263), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((3386, 3409), 'pandas.core.dtypes.common.ensure_python_int', 'ensure_python_int', (['stop'], {}), '(stop)\n', (3403, 3409), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((3426, 3449), 'pandas.core.dtypes.common.ensure_python_int', 'ensure_python_int', (['step'], {}), '(step)\n', (3443, 3449), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((8712, 8726), 'sys.getsizeof', 'getsizeof', (['rng'], {}), '(rng)\n', (8721, 8726), False, 'from sys import getsizeof\n'), ((9976, 9998), 'pandas.core.dtypes.common.ensure_python_int', 'ensure_python_int', (['key'], {}), '(key)\n', (9993, 9998), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((11459, 11490), 'pandas.core.dtypes.common.is_signed_integer_dtype', 'is_signed_integer_dtype', (['target'], {}), '(target)\n', (11482, 11490), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((13140, 13171), 'pandas.core.indexes.numeric.Float64Index', 'Float64Index', (['values'], {'name': 'name'}), '(values, name=name)\n', (13152, 13171), False, 'from pandas.core.indexes.numeric import Float64Index, Int64Index, NumericIndex\n'), ((13731, 13888), 'warnings.warn', 'warnings.warn', (['"""parameter dtype is deprecated and will be removed in a future version. Use the astype method instead."""', 'FutureWarning'], {'stacklevel': '(2)'}), "(\n 'parameter dtype is deprecated and will be removed in a future version. Use the astype method instead.'\n , FutureWarning, stacklevel=2)\n", (13744, 13888), False, 'import warnings\n'), ((26791, 26806), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['key'], {}), '(key)\n', (26801, 26806), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((27769, 27786), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['other'], {}), '(other)\n', (27779, 27786), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((10410, 10425), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['key'], {}), '(key)\n', (10420, 10425), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((27097, 27111), 'pandas.core.dtypes.common.is_scalar', 'is_scalar', (['key'], {}), '(key)\n', (27106, 27111), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((29580, 29607), 'pandas.core.dtypes.common.is_timedelta64_dtype', 'is_timedelta64_dtype', (['other'], {}), '(other)\n', (29600, 29607), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((30749, 30774), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (30760, 30774), True, 'import numpy as np\n'), ((6075, 6106), 'pandas.core.indexes.base.default_pprint', 'ibase.default_pprint', (['self.name'], {}), '(self.name)\n', (6095, 6106), True, 'import pandas.core.indexes.base as ibase\n'), ((10430, 10443), 'pandas.core.dtypes.common.is_float', 'is_float', (['key'], {}), '(key)\n', (10438, 10443), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((25726, 25774), 'numpy.concatenate', 'np.concatenate', (['[x._values for x in rng_indexes]'], {}), '([x._values for x in rng_indexes])\n', (25740, 25774), True, 'import numpy as np\n'), ((30415, 30440), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (30426, 30440), True, 'import numpy as np\n'), ((25329, 25377), 'numpy.concatenate', 'np.concatenate', (['[x._values for x in rng_indexes]'], {}), '([x._values for x in rng_indexes])\n', (25343, 25377), True, 'import numpy as np\n'), ((25407, 25425), 'pandas.core.indexes.numeric.Int64Index', 'Int64Index', (['values'], {}), '(values)\n', (25417, 25425), False, 'from pandas.core.indexes.numeric import Float64Index, Int64Index, NumericIndex\n'), ((30609, 30626), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['rstep'], {}), '(rstep)\n', (30619, 30626), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((31134, 31147), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['x'], {}), '(x)\n', (31144, 31147), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n')] |
import torch
import torch.nn as nn
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, hidden_size)
self.l3 = nn.Linear(hidden_size, hidden_size)
self.l4 = nn.Linear(hidden_size, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
out = self.relu(out)
out = self.l4(out)
# no activation and no softmax at the end
return out | [
"torch.nn.ReLU",
"torch.nn.Linear"
] | [((187, 221), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (196, 221), True, 'import torch.nn as nn\n'), ((241, 276), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (250, 276), True, 'import torch.nn as nn\n'), ((296, 331), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (305, 331), True, 'import torch.nn as nn\n'), ((350, 385), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (359, 385), True, 'import torch.nn as nn\n'), ((406, 415), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (413, 415), True, 'import torch.nn as nn\n')] |
""" Logging functions for the ``jwql`` automation platform.
This module provides decorators to log the execution of modules. Log
files are written to the ``logs/`` directory in the ``jwql`` central
storage area, named by module name and timestamp, e.g.
``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``
Authors
-------
- <NAME> 2018
- <NAME>, 2013 (WFC3 QL Version)
Use
---
To log the execution of a module, use:
::
import os
import logging
from jwql.logging.logging_functions import configure_logging
from jwql.logging.logging_functions import log_info
from jwql.logging.logging_functions import log_fail
@log_info
@log_fail
def my_main_function():
pass
if __name__ == '__main__':
module = os.path.basename(__file__).replace('.py', '')
configure_logging(module)
my_main_function()
Dependencies
------------
The user must have a configuration file named ``config.json``
placed in the ``utils`` directory.
References
----------
This code is adopted and updated from python routine
``logging_functions.py`` written by Alex Viana, 2013 for the WFC3
Quicklook automation platform.
"""
import datetime
import getpass
import importlib
import logging
import os
import pwd
import socket
import sys
import time
import traceback
from functools import wraps
from jwql.utils.permissions import set_permissions
from jwql.utils.utils import get_config, ensure_dir_exists
LOG_FILE_LOC = ''
PRODUCTION_BOOL = ''
def configure_logging(module, production_mode=True, path='./'):
"""Configure the log file with a standard logging format.
Parameters
----------
module : str
The name of the module being logged.
production_mode : bool
Whether or not the output should be written to the production
environement.
path : str
Where to write the log if user-supplied path; default to working dir.
"""
# Determine log file location
if production_mode:
log_file = make_log_file(module)
else:
log_file = make_log_file(module, production_mode=False, path=path)
global LOG_FILE_LOC
global PRODUCTION_BOOL
LOG_FILE_LOC = log_file
PRODUCTION_BOOL = production_mode
# Create the log file and set the permissions
logging.basicConfig(filename=log_file,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S %p',
level=logging.INFO)
set_permissions(log_file)
def make_log_file(module, production_mode=True, path='./'):
"""Create the log file name based on the module name.
The name of the ``log_file`` is a combination of the name of the
module being logged and the current datetime.
Parameters
----------
module : str
The name of the module being logged.
production_mode : bool
Whether or not the output should be written to the production
environment.
path : str
Where to write the log if user-supplied path; default to
working dir.
Returns
-------
log_file : str
The full path to where the log file will be written to.
"""
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
filename = '{0}_{1}.log'.format(module, timestamp)
user = pwd.getpwuid(os.getuid()).pw_name
settings = get_config()
admin_account = settings['admin_account']
log_path = settings['log_dir']
exempt_modules = []
if user != admin_account and module not in exempt_modules and production_mode:
module = os.path.join('dev', module)
if production_mode:
log_file = os.path.join(log_path, module, filename)
else:
log_file = os.path.join(path, filename)
ensure_dir_exists(os.path.dirname(log_file))
return log_file
def log_info(func):
"""Decorator to log useful system information.
This function can be used as a decorator to log user environment
and system information. Future packages we want to track can be
added or removed as necessary.
Parameters
----------
func : func
The function to decorate.
Returns
-------
wrapped : func
The wrapped function.
"""
@wraps(func)
def wrapped(*a, **kw):
# Log environment information
logging.info('User: ' + getpass.getuser())
logging.info('System: ' + socket.gethostname())
logging.info('Python Version: ' + sys.version.replace('\n', ''))
logging.info('Python Executable Path: ' + sys.executable)
# Read in setup.py file to build list of required modules
settings = get_config()
setup_file_name = settings['setup_file']
with open(setup_file_name) as setup:
for line in setup:
if line[0:8] == "REQUIRES":
module_required = line[12:-2]
module_list = module_required.split(',')
# Clean up the module list
module_list = [module.replace('"', '').replace("'", '').replace(' ', '') for module in module_list]
module_list = [module.split('=')[0] for module in module_list]
# Log common module version information
for module in module_list:
try:
mod = importlib.import_module(module)
logging.info(module + ' Version: ' + mod.__version__)
logging.info(module + ' Path: ' + mod.__path__[0])
except ImportError as err:
logging.warning(err)
# Call the function and time it
t1_cpu = time.clock()
t1_time = time.time()
func(*a, **kw)
t2_cpu = time.clock()
t2_time = time.time()
# Log execution time
hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)
minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)
hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)
minutes_time, seconds_time = divmod(remainder_time, 60)
logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time))
logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu))
return wrapped
def log_fail(func):
"""Decorator to log crashes in the decorated code.
Parameters
----------
func : func
The function to decorate.
Returns
-------
wrapped : func
The wrapped function.
"""
@wraps(func)
def wrapped(*a, **kw):
try:
# Run the function
func(*a, **kw)
logging.info('Completed Successfully')
except Exception:
logging.critical(traceback.format_exc())
logging.critical('CRASHED')
return wrapped
| [
"logging.basicConfig",
"traceback.format_exc",
"importlib.import_module",
"time.clock",
"os.getuid",
"os.path.join",
"logging.warning",
"functools.wraps",
"sys.version.replace",
"os.path.dirname",
"datetime.datetime.now",
"logging.critical",
"time.time",
"getpass.getuser",
"socket.gethostname",
"jwql.utils.utils.get_config",
"logging.info",
"jwql.utils.permissions.set_permissions"
] | [((2388, 2537), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_file', 'format': '"""%(asctime)s %(levelname)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S %p"""', 'level': 'logging.INFO'}), "(filename=log_file, format=\n '%(asctime)s %(levelname)s: %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S %p', level=logging.INFO)\n", (2407, 2537), False, 'import logging\n'), ((2604, 2629), 'jwql.utils.permissions.set_permissions', 'set_permissions', (['log_file'], {}), '(log_file)\n', (2619, 2629), False, 'from jwql.utils.permissions import set_permissions\n'), ((3482, 3494), 'jwql.utils.utils.get_config', 'get_config', ([], {}), '()\n', (3492, 3494), False, 'from jwql.utils.utils import get_config, ensure_dir_exists\n'), ((4358, 4369), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (4363, 4369), False, 'from functools import wraps\n'), ((6609, 6620), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (6614, 6620), False, 'from functools import wraps\n'), ((3701, 3728), 'os.path.join', 'os.path.join', (['"""dev"""', 'module'], {}), "('dev', module)\n", (3713, 3728), False, 'import os\n'), ((3773, 3813), 'os.path.join', 'os.path.join', (['log_path', 'module', 'filename'], {}), '(log_path, module, filename)\n', (3785, 3813), False, 'import os\n'), ((3843, 3871), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (3855, 3871), False, 'import os\n'), ((3895, 3920), 'os.path.dirname', 'os.path.dirname', (['log_file'], {}), '(log_file)\n', (3910, 3920), False, 'import os\n'), ((4624, 4681), 'logging.info', 'logging.info', (["('Python Executable Path: ' + sys.executable)"], {}), "('Python Executable Path: ' + sys.executable)\n", (4636, 4681), False, 'import logging\n'), ((4768, 4780), 'jwql.utils.utils.get_config', 'get_config', ([], {}), '()\n', (4778, 4780), False, 'from jwql.utils.utils import get_config, ensure_dir_exists\n'), ((5702, 5714), 'time.clock', 'time.clock', ([], {}), '()\n', (5712, 5714), False, 'import time\n'), ((5733, 5744), 'time.time', 'time.time', ([], {}), '()\n', (5742, 5744), False, 'import time\n'), ((5785, 5797), 'time.clock', 'time.clock', ([], {}), '()\n', (5795, 5797), False, 'import time\n'), ((5816, 5827), 'time.time', 'time.time', ([], {}), '()\n', (5825, 5827), False, 'import time\n'), ((3315, 3338), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3336, 3338), False, 'import datetime\n'), ((3445, 3456), 'os.getuid', 'os.getuid', ([], {}), '()\n', (3454, 3456), False, 'import os\n'), ((6733, 6771), 'logging.info', 'logging.info', (['"""Completed Successfully"""'], {}), "('Completed Successfully')\n", (6745, 6771), False, 'import logging\n'), ((4468, 4485), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (4483, 4485), False, 'import getpass\n'), ((4521, 4541), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4539, 4541), False, 'import socket\n'), ((4585, 4614), 'sys.version.replace', 'sys.version.replace', (['"""\n"""', '""""""'], {}), "('\\n', '')\n", (4604, 4614), False, 'import sys\n'), ((5399, 5430), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (5422, 5430), False, 'import importlib\n'), ((5447, 5500), 'logging.info', 'logging.info', (["(module + ' Version: ' + mod.__version__)"], {}), "(module + ' Version: ' + mod.__version__)\n", (5459, 5500), False, 'import logging\n'), ((5517, 5567), 'logging.info', 'logging.info', (["(module + ' Path: ' + mod.__path__[0])"], {}), "(module + ' Path: ' + mod.__path__[0])\n", (5529, 5567), False, 'import logging\n'), ((6864, 6891), 'logging.critical', 'logging.critical', (['"""CRASHED"""'], {}), "('CRASHED')\n", (6880, 6891), False, 'import logging\n'), ((5623, 5643), 'logging.warning', 'logging.warning', (['err'], {}), '(err)\n', (5638, 5643), False, 'import logging\n'), ((6828, 6850), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6848, 6850), False, 'import traceback\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by <NAME>. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: <NAME>
# created: 2020-09-19
# modified: 2020-09-19
#
import sys, colorsys
import ioexpander as io
from colorama import init, Fore, Style
init()
from lib.logger import Logger
# ..............................................................................
class Potentiometer(object):
'''
Configures an IO Expander Potentiometer breakout, returning an analog
value scaled to a specified range. For a center-zero pot simply
specify the minimum value as (-1.0 * out_max).
'''
def __init__(self, config, level):
super().__init__()
self._log = Logger('ioe', level)
if config is None:
raise ValueError('no configuration provided.')
_config = config['ros'].get('ioe_potentiometer')
# 0x18 for IO Expander, 0x0E for the potentiometer breakout
# self._i2c_addr = 0x0E
self._i2c_addr = _config.get('i2c_address')
self._pin_red = _config.get('pin_red')
self._pin_green = _config.get('pin_green')
self._pin_blue = _config.get('pin_blue')
self._log.info("pins: red: {}; green: {}; blue: {}".format(self._pin_red, self._pin_green, self._pin_blue))
self._pot_enc_a = 12
self._pot_enc_b = 3
self._pot_enc_c = 11
self._max_value = 3.3 # maximum voltage (3.3v supply)
self._brightness = _config.get('brightness') # effectively max fraction of period LED will be on
self._period = int(255 / self._brightness) # add a period large enough to get 0-255 steps at the desired brightness
_in_min = _config.get('in_min') # minimum analog value from IO Expander
_in_max = _config.get('in_max') # maximum analog value from IO Expander
self.set_input_limits(_in_min, _in_max)
_out_min = _config.get('out_min') # minimum scaled output value
_out_max = _config.get('out_max') # maximum scaled output value
self.set_output_limits(_out_min, _out_max)
# now configure IO Expander
self._ioe = io.IOE(i2c_addr=self._i2c_addr)
self._ioe.set_mode(self._pot_enc_a, io.PIN_MODE_PP)
self._ioe.set_mode(self._pot_enc_b, io.PIN_MODE_PP)
self._ioe.set_mode(self._pot_enc_c, io.ADC)
self._ioe.output(self._pot_enc_a, 1)
self._ioe.output(self._pot_enc_b, 0)
self._ioe.set_pwm_period(self._period)
self._ioe.set_pwm_control(divider=2) # PWM as fast as we can to avoid LED flicker
self._ioe.set_mode(self._pin_red, io.PWM, invert=True)
self._ioe.set_mode(self._pin_green, io.PWM, invert=True)
self._ioe.set_mode(self._pin_blue, io.PWM, invert=True)
self._log.info("running LED with {} brightness steps.".format(int(self._period * self._brightness)))
self._log.info("ready.")
# ..........................................................................
def set_input_limits(self, in_min, in_max):
self._in_min = in_min
self._in_max = in_max
self._log.info('input range:\t{:>5.2f}-{:<5.2f}'.format(self._in_min, self._in_max))
# ..........................................................................
def set_output_limits(self, out_min, out_max):
self._out_min = out_min
self._out_max = out_max
self._log.info('output range:\t{:>5.2f}-{:<5.2f}'.format(self._out_min, self._out_max))
# ..........................................................................
def get_value(self):
value = self._max_value - self._ioe.input(self._pot_enc_c)
self._log.debug(Fore.BLACK + 'value: {:<5.2f}'.format(value))
return value
# ..........................................................................
def set_rgb(self, value):
h = value / self._max_value # time.time() / 10.0
r, g, b = [int(c * self._period * self._brightness) for c in colorsys.hsv_to_rgb(h, 1.0, 1.0)]
self._ioe.output(self._pin_red, r)
self._ioe.output(self._pin_green, g)
self._ioe.output(self._pin_blue, b)
self._log.debug('value: {:<5.2f}; rgb: {},{},{}'.format(value, r, g, b))
# ..........................................................................
def get_scaled_value(self, update_led=True):
'''
Return a scaled value while also updating the RGB LED if the
argument is True (the default).
'''
_value = self.get_value()
if update_led:
self.set_rgb(_value)
return self.scale_value(_value) # as float
# # ..........................................................................
# def x_get_scaled_value(self):
# '''
# (out_max - out_min)(value - in_min)
# f(x) = ----------------------------------- + out_min
# in_max - in_min
# where: a = 0.0, b = 1.0, min = 0, max = 330.
# '''
# return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
# ..........................................................................
def scale_value(self, value):
'''
(out_max - out_min)(value - in_min)
f(x) = ----------------------------------- + out_min
in_max - in_min
where e.g.: a = 0.0, b = 1.0, min = 0, max = 330.
'''
return (( self._out_max - self._out_min ) * ( value - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
# return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
#EOF
| [
"colorsys.hsv_to_rgb",
"lib.logger.Logger",
"ioexpander.IOE",
"colorama.init"
] | [((412, 418), 'colorama.init', 'init', ([], {}), '()\n', (416, 418), False, 'from colorama import init, Fore, Style\n'), ((856, 876), 'lib.logger.Logger', 'Logger', (['"""ioe"""', 'level'], {}), "('ioe', level)\n", (862, 876), False, 'from lib.logger import Logger\n'), ((2329, 2360), 'ioexpander.IOE', 'io.IOE', ([], {'i2c_addr': 'self._i2c_addr'}), '(i2c_addr=self._i2c_addr)\n', (2335, 2360), True, 'import ioexpander as io\n'), ((4186, 4218), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['h', '(1.0)', '(1.0)'], {}), '(h, 1.0, 1.0)\n', (4205, 4218), False, 'import sys, colorsys\n')] |
from django import template
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.template import loader
@login_required(login_url="/login/")
def index(request):
context = {}
context["segment"] = "index"
html_template = loader.get_template("index.html")
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split("/")[-1]
context["segment"] = load_template
html_template = loader.get_template(load_template)
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template("page-404.html")
return HttpResponse(html_template.render(context, request))
except: # noqa: E722
html_template = loader.get_template("page-500.html")
return HttpResponse(html_template.render(context, request))
| [
"django.template.loader.get_template",
"django.contrib.auth.decorators.login_required"
] | [((161, 196), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (175, 196), False, 'from django.contrib.auth.decorators import login_required\n'), ((389, 424), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (403, 424), False, 'from django.contrib.auth.decorators import login_required\n'), ((288, 321), 'django.template.loader.get_template', 'loader.get_template', (['"""index.html"""'], {}), "('index.html')\n", (307, 321), False, 'from django.template import loader\n'), ((701, 735), 'django.template.loader.get_template', 'loader.get_template', (['load_template'], {}), '(load_template)\n', (720, 735), False, 'from django.template import loader\n'), ((870, 906), 'django.template.loader.get_template', 'loader.get_template', (['"""page-404.html"""'], {}), "('page-404.html')\n", (889, 906), False, 'from django.template import loader\n'), ((1025, 1061), 'django.template.loader.get_template', 'loader.get_template', (['"""page-500.html"""'], {}), "('page-500.html')\n", (1044, 1061), False, 'from django.template import loader\n')] |
import random
import string
import os
from IPython.display import display, HTML
from .utils import html_loader
from .utils import get_content
from jinja2 import Template
class JupyterSlides:
def __init__(
self,
content_path='./content.yaml',
table_contents=False
):
self.set_base_dirs()
self.set_source_dirs()
self.content = get_content(content_path)
self.render_init_templates()
if table_contents:
self.render_table_contents()
def set_base_dirs(self):
self.module_path = os.path.dirname(os.path.realpath(__file__))
self.base_template_dir = f'{self.module_path}/src/templates'
self.base_css_dir = f'{self.module_path}/src/assets/css'
self.base_js_dir = f'{self.module_path}/src/js'
def set_source_dirs(self):
self.called_from_path = os.getcwd()
folders = self.called_from_path.split('/')
self.source_path = '/'.join(folders[:folders.index('talks')])
self.template_dir = f'{self.source_path}/src/templates'
self.css_dir = f'{self.source_path}/src/css'
self.js_dir = f'{self.source_path}/src/js'
def render_init_templates(self):
self.render(
template='init',
data={'dir': self.module_path},
template_dir=self.base_template_dir
)
if os.path.isfile(f'{self.template_dir}/init.html'):
self.render(
template=f'init',
data=self.content.get('init_vars', {})
)
id = JupyterSlides.randomUUID()
self.render(
template='eye',
data={'eye_id': id},
template_dir=self.base_template_dir
)
def render_table_contents(self):
if os.path.isfile(f'{self.template_dir}/table-contents.html'):
contents_template_dir = self.template_dir
else:
contents_template_dir = self.base_template_dir
self.render(
template='table-contents',
data=self.generate_table_contents(),
template_dir=contents_template_dir,
render_type='slide'
)
def parse_template(self, template=None, data={}, template_dir=None, render_type=None):
if not template_dir:
if os.path.isfile(f'{self.template_dir}/{template}.html'):
html = html_loader(f'file:{self.template_dir}/{template}.html')
else:
template = 'basic-slide'
html = html_loader(f'file:{self.base_template_dir}/{template}.html')
else:
if not os.path.isfile(f'{template_dir}/{template}.html'):
template = 'basic-slide'
template_dir = self.base_template_dir
html = html_loader(
f'file:{template_dir}/{template}.html')
if render_type == 'slide':
html = '<div id="{{ data["slide_id"] }}" class="slide-container">' + \
html + '</div>'
tm = Template(html)
return tm.render(data=data)
def render(self, template=None, data={}, navigation=False, template_dir=None, render_type=None):
html = self.parse_template(
template=template,
data=data,
template_dir=template_dir,
render_type=render_type
)
if navigation:
navigation_template = self.parse_template(
template='navigation',
template_dir=template_dir
)
html += navigation_template
display(HTML(html))
def render_content(self, key):
data = self.content.get(key)
id = JupyterSlides.randomUUID()
self.render(
template='eye',
data={'eye_id': id},
template_dir=self.base_template_dir
)
if data.get('slides'):
for el in data.get('slides'):
template = el.get('template')
self.render(template=template, data=el, render_type='slide')
@staticmethod
def randomUUID(stringLength=20):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def generate_table_contents(self):
table = {}
items = []
for _, item in self.content.items():
for sub_item in item['slides']:
sub_item['slide_id'] = \
str(item['indice']) + '.' + str(sub_item['indice']) +\
sub_item['content_title']
item['slide_id'] = item['slides'][0]['slide_id']
items.append(item)
table['title'] = 'Table of Contents'
table['eyebrow'] = 'Table of Contents'
table['items'] = items
return table
| [
"random.choice",
"jinja2.Template",
"os.getcwd",
"os.path.isfile",
"os.path.realpath",
"IPython.display.HTML"
] | [((870, 881), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (879, 881), False, 'import os\n'), ((1373, 1421), 'os.path.isfile', 'os.path.isfile', (['f"""{self.template_dir}/init.html"""'], {}), "(f'{self.template_dir}/init.html')\n", (1387, 1421), False, 'import os\n'), ((1782, 1840), 'os.path.isfile', 'os.path.isfile', (['f"""{self.template_dir}/table-contents.html"""'], {}), "(f'{self.template_dir}/table-contents.html')\n", (1796, 1840), False, 'import os\n'), ((3018, 3032), 'jinja2.Template', 'Template', (['html'], {}), '(html)\n', (3026, 3032), False, 'from jinja2 import Template\n'), ((588, 614), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (604, 614), False, 'import os\n'), ((2305, 2359), 'os.path.isfile', 'os.path.isfile', (['f"""{self.template_dir}/{template}.html"""'], {}), "(f'{self.template_dir}/{template}.html')\n", (2319, 2359), False, 'import os\n'), ((3576, 3586), 'IPython.display.HTML', 'HTML', (['html'], {}), '(html)\n', (3580, 3586), False, 'from IPython.display import display, HTML\n'), ((2618, 2667), 'os.path.isfile', 'os.path.isfile', (['f"""{template_dir}/{template}.html"""'], {}), "(f'{template_dir}/{template}.html')\n", (2632, 2667), False, 'import os\n'), ((4213, 4235), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (4226, 4235), False, 'import random\n')] |
from decimal import Decimal
from fixtures import * # noqa: F401,F403
from fixtures import TEST_NETWORK
from flaky import flaky # noqa: F401
from pyln.client import RpcError, Millisatoshi
from utils import (
only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT,
VALGRIND
)
import os
import pytest
import subprocess
import time
import unittest
@unittest.skipIf(TEST_NETWORK != 'regtest', "Test relies on a number of example addresses valid only in regtest")
def test_withdraw(node_factory, bitcoind):
amount = 1000000
# Don't get any funds from previous runs.
l1 = node_factory.get_node(random_hsm=True)
l2 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
# Add some funds to withdraw later
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
# Reach around into the db to check that outputs were added
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 10
waddr = l1.bitcoin.rpc.getnewaddress()
# Now attempt to withdraw some (making sure we collect multiple inputs)
with pytest.raises(RpcError):
l1.rpc.withdraw('not an address', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, 'not an amount')
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, -amount)
with pytest.raises(RpcError, match=r'Cannot afford transaction'):
l1.rpc.withdraw(waddr, amount * 100)
out = l1.rpc.withdraw(waddr, 2 * amount)
# Make sure bitcoind received the withdrawal
unspent = l1.bitcoin.rpc.listunspent(0)
withdrawal = [u for u in unspent if u['txid'] == out['txid']]
assert(withdrawal[0]['amount'] == Decimal('0.02'))
l1.bitcoin.generate_block(1)
sync_blockheight(l1.bitcoin, [l1])
# Check that there are no unconfirmed outputs (change should be confirmed)
for o in l1.rpc.listfunds()['outputs']:
assert o['status'] == 'confirmed'
# Now make sure two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 2
# Now send some money to l2.
# lightningd uses P2SH-P2WPKH
waddr = l2.rpc.newaddr('bech32')['bech32']
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Make sure l2 received the withdrawal.
wait_for(lambda: len(l2.rpc.listfunds()['outputs']) == 1)
outputs = l2.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == 2 * amount
# Now make sure an additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 4
# Simple test for withdrawal to P2WPKH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kdl9fad', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxxxxxx', 2 * amount)
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 6
# Simple test for withdrawal to P2WSH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1prp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qsm03tq', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qxxxxxx', 2 * amount)
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 8
# failure testing for invalid SegWit addresses, from BIP173
# HRP character out of range
with pytest.raises(RpcError):
l1.rpc.withdraw(' 1nwldj5', 2 * amount)
# overall max length exceeded
with pytest.raises(RpcError):
l1.rpc.withdraw('an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx', 2 * amount)
# No separator character
with pytest.raises(RpcError):
l1.rpc.withdraw('pzry9x0s0muk', 2 * amount)
# Empty HRP
with pytest.raises(RpcError):
l1.rpc.withdraw('1pzry9x0s0muk', 2 * amount)
# Invalid witness version
with pytest.raises(RpcError):
l1.rpc.withdraw('BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2', 2 * amount)
# Invalid program length for witness version 0 (per BIP141)
with pytest.raises(RpcError):
l1.rpc.withdraw('BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P', 2 * amount)
# Mixed case
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7', 2 * amount)
# Non-zero padding in 8-to-5 conversion
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv', 2 * amount)
# Should have 6 outputs available.
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 6
# Test withdrawal to self.
l1.rpc.withdraw(l1.rpc.newaddr('bech32')['bech32'], 'all', minconf=0)
bitcoind.generate_block(1)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 1
l1.rpc.withdraw(waddr, 'all', minconf=0)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 0
# This should fail, can't even afford fee.
with pytest.raises(RpcError, match=r'Cannot afford transaction'):
l1.rpc.withdraw(waddr, 'all')
# Add some funds to withdraw
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
# Try passing in a utxo set
utxos = [utxo["txid"] + ":" + str(utxo["output"]) for utxo in l1.rpc.listfunds()["outputs"]][:4]
withdrawal = l1.rpc.withdraw(waddr, 2 * amount, utxos=utxos)
decode = bitcoind.rpc.decoderawtransaction(withdrawal['tx'])
assert decode['txid'] == withdrawal['txid']
# Check that correct utxos are included
assert len(decode['vin']) == 4
vins = ["{}:{}".format(v['txid'], v['vout']) for v in decode['vin']]
for utxo in utxos:
assert utxo in vins
def test_minconf_withdraw(node_factory, bitcoind):
"""Issue 2518: ensure that ridiculous confirmation levels don't overflow
The number of confirmations is used to compute a maximum height that is to
be accepted. If the current height is smaller than the number of
confirmations we wrap around and just select everything. The fix is to
clamp the maxheight parameter to a positive small number.
"""
amount = 1000000
# Don't get any funds from previous runs.
l1 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
# Add some funds to withdraw later
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
with pytest.raises(RpcError):
l1.rpc.withdraw(destination=addr, satoshi=10000, feerate='normal', minconf=9999999)
def test_addfunds_from_block(node_factory, bitcoind):
"""Send funds to the daemon without telling it explicitly
"""
# Previous runs with same bitcoind can leave funds!
l1 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 0.1)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1)
outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == 10000000
# The address we detect must match what was paid to.
output = only_one(l1.rpc.listfunds()['outputs'])
assert output['address'] == addr
# Send all our money to a P2WPKH address this time.
addr = l1.rpc.newaddr("bech32")['bech32']
l1.rpc.withdraw(addr, "all")
bitcoind.generate_block(1)
time.sleep(1)
# The address we detect must match what was paid to.
output = only_one(l1.rpc.listfunds()['outputs'])
assert output['address'] == addr
@unittest.skipIf(not COMPAT, "needs COMPAT=1")
def test_deprecated_txprepare(node_factory, bitcoind):
"""Test the deprecated old-style:
txprepare {destination} {satoshi} {feerate} {minconf}
"""
amount = 10**4
l1 = node_factory.get_node(options={'allow-deprecated-apis': True})
addr = l1.rpc.newaddr()['bech32']
for i in range(7):
l1.fundwallet(10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 7)
# Array type
with pytest.raises(RpcError, match=r'.* should be an amount in satoshis or all, not .*'):
l1.rpc.call('txprepare', [addr, 'slow'])
with pytest.raises(RpcError, match=r'Need set \'satoshi\' field.'):
l1.rpc.call('txprepare', [addr])
with pytest.raises(RpcError, match=r'Could not parse destination address.*'):
l1.rpc.call('txprepare', [Millisatoshi(amount * 100), 'slow', 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), 'slow', 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), 'normal'])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), None, 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100)])
# Object type
with pytest.raises(RpcError, match=r'Need set \'outputs\' field.'):
l1.rpc.call('txprepare', {'destination': addr, 'feerate': 'slow'})
with pytest.raises(RpcError, match=r'Need set \'outputs\' field.'):
l1.rpc.call('txprepare', {'satoshi': Millisatoshi(amount * 100), 'feerate': '10perkw', 'minconf': 2})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100), 'feerate': '2000perkw', 'minconf': 1})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100), 'feerate': '2000perkw'})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100)})
def test_txprepare_multi(node_factory, bitcoind):
amount = 10000000
l1 = node_factory.get_node(random_hsm=True)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1)
outputs = []
for i in range(9):
outputs.append({l1.rpc.newaddr()['bech32']: Millisatoshi(amount * 100)})
prep = l1.rpc.txprepare(outputs=outputs)
l1.rpc.txdiscard(prep['txid'])
def test_txprepare(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(random_hsm=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
prep = l1.rpc.txprepare(outputs=[{addr: Millisatoshi(amount * 3 * 1000)}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# 4 inputs, 2 outputs (3 if we have a fee output).
assert len(decode['vin']) == 4
assert len(decode['vout']) == 2 if not chainparams['feeoutput'] else 3
# One output will be correct.
outnum = [i for i, o in enumerate(decode['vout']) if o['value'] == Decimal(amount * 3) / 10**8][0]
for i, o in enumerate(decode['vout']):
if i == outnum:
assert o['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert o['scriptPubKey']['addresses'] == [addr]
else:
assert o['scriptPubKey']['type'] in ['witness_v0_keyhash', 'fee']
# Now prepare one with no change.
prep2 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep2['unsigned_tx'])
assert decode['txid'] == prep2['txid']
# 6 inputs, 1 outputs.
assert len(decode['vin']) == 6
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be paid.
assert decode['vout'][0]['value'] < Decimal(amount * 6) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 6) / 10**8 - Decimal(0.0002)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
# If I cancel the first one, I can get those first 4 outputs.
discard = l1.rpc.txdiscard(prep['txid'])
assert discard['txid'] == prep['txid']
assert discard['unsigned_tx'] == prep['unsigned_tx']
prep3 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep3['unsigned_tx'])
assert decode['txid'] == prep3['txid']
# 4 inputs, 1 outputs.
assert len(decode['vin']) == 4
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be taken
assert decode['vout'][0]['value'] < Decimal(amount * 4) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 4) / 10**8 - Decimal(0.0002)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
# Cannot discard twice.
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
# Discard everything, we should now spend all inputs.
l1.rpc.txdiscard(prep2['txid'])
l1.rpc.txdiscard(prep3['txid'])
prep4 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep4['unsigned_tx'])
assert decode['txid'] == prep4['txid']
# 10 inputs, 1 outputs.
assert len(decode['vin']) == 10
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be taken
assert decode['vout'][0]['value'] < Decimal(amount * 10) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 10) / 10**8 - Decimal(0.0003)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
l1.rpc.txdiscard(prep4['txid'])
# Try passing in a utxo set
utxos = [utxo["txid"] + ":" + str(utxo["output"]) for utxo in l1.rpc.listfunds()["outputs"]][:4]
prep5 = l1.rpc.txprepare([{addr:
Millisatoshi(amount * 3.5 * 1000)}], utxos=utxos)
decode = bitcoind.rpc.decoderawtransaction(prep5['unsigned_tx'])
assert decode['txid'] == prep5['txid']
# Check that correct utxos are included
assert len(decode['vin']) == 4
vins = ["{}:{}".format(v['txid'], v['vout']) for v in decode['vin']]
for utxo in utxos:
assert utxo in vins
# We should have a change output, so this is exact
assert len(decode['vout']) == 3 if chainparams['feeoutput'] else 2
assert decode['vout'][1]['value'] == Decimal(amount * 3.5) / 10**8
assert decode['vout'][1]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][1]['scriptPubKey']['addresses'] == [addr]
# Discard prep4 and get all funds again
l1.rpc.txdiscard(prep5['txid'])
with pytest.raises(RpcError, match=r'this destination wants all satoshi. The count of outputs can\'t be more than 1'):
prep5 = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 1000)},
{addr: 'all'}])
prep5 = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 500 + 100000)},
{addr: Millisatoshi(amount * 3 * 500 - 100000)}])
decode = bitcoind.rpc.decoderawtransaction(prep5['unsigned_tx'])
assert decode['txid'] == prep5['txid']
# 4 inputs, 3 outputs(include change).
assert len(decode['vin']) == 4
assert len(decode['vout']) == 4 if chainparams['feeoutput'] else 3
# One output will be correct.
for i in range(3 + chainparams['feeoutput']):
if decode['vout'][i - 1]['value'] == Decimal('0.01500100'):
outnum1 = i - 1
elif decode['vout'][i - 1]['value'] == Decimal('0.01499900'):
outnum2 = i - 1
else:
changenum = i - 1
assert decode['vout'][outnum1]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][outnum1]['scriptPubKey']['addresses'] == [addr]
assert decode['vout'][outnum2]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][outnum2]['scriptPubKey']['addresses'] == [addr]
assert decode['vout'][changenum]['scriptPubKey']['type'] == 'witness_v0_keyhash'
def test_txsend(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(random_hsm=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
prep = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 1000)}])
out = l1.rpc.txsend(prep['txid'])
# Cannot discard after send!
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
wait_for(lambda: prep['txid'] in bitcoind.rpc.getrawmempool())
# Signed tx should have same txid
decode = bitcoind.rpc.decoderawtransaction(out['tx'])
assert decode['txid'] == prep['txid']
bitcoind.generate_block(1)
# Change output should appear.
if decode['vout'][0]['value'] == Decimal(amount * 3) / 10**8:
changenum = 1
elif decode['vout'][1]['value'] == Decimal(amount * 3) / 10**8:
changenum = 0
else:
assert False
# Those spent outputs are gone, but change output has arrived.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10 - len(decode['vin']) + 1)
# Change address should appear in listfunds()
assert decode['vout'][changenum]['scriptPubKey']['addresses'][0] in [f['address'] for f in l1.rpc.listfunds()['outputs']]
def test_txprepare_restart(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(may_fail=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
# L1 will forget all about it.
l1.restart()
# It goes backwards in blockchain just in case there was a reorg. Wait.
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
# This will also work if we simply kill it.
l1.restart(clean=False)
# It goes backwards in blockchain just in case there was a reorg. Wait.
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
# It should have logged this for each output.
for i in decode['vin']:
assert l1.daemon.is_in_log('wallet: reserved output {}/{} reset to available'.format(i['txid'], i['vout']))
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
@unittest.skipIf(TEST_NETWORK != 'regtest', "Fee outputs throw off our output matching logic")
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Tests annotations which are compiled only with experimental features")
def test_transaction_annotations(node_factory, bitcoind):
l1, l2, l3 = node_factory.get_nodes(3)
l1.fundwallet(10**6)
# We should now have a transaction that gave us the funds in the
# transactions table...
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'confirmed')
out = outputs[0]
idx = out['output']
assert(idx in [0, 1] and out['value'] == 10**6)
# ... and it should have an annotation on the output reading 'deposit'
txs = l1.rpc.listtransactions()['transactions']
assert(len(txs) == 1)
tx = txs[0]
output = tx['outputs'][idx]
assert(output['type'] == 'deposit' and output['satoshis'] == '1000000000msat')
# ... and all other output should be change, and have no annotations
types = []
for i, o in enumerate(tx['outputs']):
if i == idx:
continue
if 'type' in o:
types.append(o['type'])
else:
types.append(None)
assert(set([None]) == set(types))
##########################################################################
# Let's now open a channel. The opener should get the funding transaction
# annotated as channel open and deposit.
l1.connect(l2)
fundingtx = l1.rpc.fundchannel(l2.info['id'], 10**5)
# We should have one output available, and it should be unconfirmed
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'unconfirmed')
# It should also match the funding txid:
assert(outputs[0]['txid'] == fundingtx['txid'])
# Confirm the channel and check that the output changed to confirmed
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l1, l2])
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'confirmed')
# We should have 2 transactions, the second one should be the funding tx
# (we are ordering by blockheight and txindex, so that order should be ok)
txs = l1.rpc.listtransactions()['transactions']
assert(len(txs) == 2 and txs[1]['hash'] == fundingtx['txid'])
# Check the annotated types
types = [o['type'] for o in txs[1]['outputs']]
changeidx = 0 if types[0] == 'deposit' else 1
fundidx = 1 - changeidx
assert(types[changeidx] == 'deposit' and types[fundidx] == 'channel_funding')
# And check the channel annotation on the funding output
peers = l1.rpc.listpeers()['peers']
assert(len(peers) == 1 and len(peers[0]['channels']) == 1)
scid = peers[0]['channels'][0]['short_channel_id']
assert(txs[1]['outputs'][fundidx]['channel'] == scid)
@unittest.skipIf(VALGRIND, "It does not play well with prompt and key derivation.")
def test_hsm_secret_encryption(node_factory):
l1 = node_factory.get_node(may_fail=True) # May fail when started without key
password = "<PASSWORD>"
# We need to simulate a terminal to use termios in `lightningd`.
master_fd, slave_fd = os.openpty()
# Test we can encrypt an already-existing and not encrypted hsm_secret
l1.stop()
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
id = l1.rpc.getinfo()["id"]
l1.stop()
# Test we cannot start the same wallet without specifying --encrypted-hsm
l1.daemon.opts.pop("encrypted-hsm")
with pytest.raises(subprocess.CalledProcessError, match=r'returned non-zero exit status 1'):
subprocess.check_call(l1.daemon.cmd_line)
# Test we cannot restore the same wallet with another password
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, stderr=subprocess.STDOUT,
wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password[2:].encode("utf-8"))
assert(l1.daemon.proc.wait() == 1)
assert(l1.daemon.is_in_log("Wrong password for encrypted hsm_secret."))
# Test we can restore the same wallet with the same password
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
assert id == l1.rpc.getinfo()["id"]
@unittest.skipIf(VALGRIND, "It does not play well with prompt and key derivation.")
def test_hsmtool_secret_decryption(node_factory):
l1 = node_factory.get_node()
password = "<PASSWORD>"
hsm_path = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "hsm_secret")
# We need to simulate a terminal to use termios in `lightningd`.
master_fd, slave_fd = os.openpty()
# Encrypt the master seed
l1.stop()
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
node_id = l1.rpc.getinfo()["id"]
l1.stop()
# We can't use a wrong password !
cmd_line = ["tools/hsmtool", "decrypt", hsm_path, "A wrong pass"]
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(cmd_line)
# Decrypt it with hsmtool
cmd_line[3] = password[:-1]
subprocess.check_call(cmd_line)
# Then test we can now start it without password
l1.daemon.opts.pop("encrypted-hsm")
l1.daemon.start(stdin=slave_fd, wait_for_initialized=True)
assert node_id == l1.rpc.getinfo()["id"]
l1.stop()
# Test we can encrypt it offline
cmd_line[1] = "encrypt"
subprocess.check_call(cmd_line)
# Now we need to pass the encrypted-hsm startup option
l1.stop()
with pytest.raises(subprocess.CalledProcessError, match=r'returned non-zero exit status 1'):
subprocess.check_call(l1.daemon.cmd_line)
l1.daemon.opts.update({"encrypted-hsm": None})
master_fd, slave_fd = os.openpty()
l1.daemon.start(stdin=slave_fd, stderr=subprocess.STDOUT,
wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
assert node_id == l1.rpc.getinfo()["id"]
l1.stop()
# And finally test that we can also decrypt if encrypted with hsmtool
cmd_line[1] = "decrypt"
subprocess.check_call(cmd_line)
l1.daemon.opts.pop("encrypted-hsm")
l1.daemon.start(stdin=slave_fd, wait_for_initialized=True)
assert node_id == l1.rpc.getinfo()["id"]
# this test does a 'listtransactions' on a yet unconfirmed channel
def test_fundchannel_listtransaction(node_factory, bitcoind):
l1, l2 = node_factory.get_nodes(2)
l1.fundwallet(10**6)
l1.connect(l2)
txid = l1.rpc.fundchannel(l2.info['id'], 10**5)['txid']
# next call warned about SQL Accessing a null column
# and crashed the daemon for accessing random memory or null
txs = l1.rpc.listtransactions()['transactions']
tx = [t for t in txs if t['hash'] == txid][0]
assert tx['blockheight'] == 0
def test_withdraw_nlocktime(node_factory):
"""
Test that we don't set the nLockTime to 0 for withdrawal transactions.
"""
l1 = node_factory.get_node(1)
l1.fundwallet(10**4)
addr = l1.rpc.newaddr()["bech32"]
tx = l1.rpc.withdraw(addr, 10**3)["tx"]
nlocktime = node_factory.bitcoind.rpc.decoderawtransaction(tx)["locktime"]
tip = node_factory.bitcoind.rpc.getblockcount()
assert nlocktime > 0 and nlocktime <= tip
@flaky
@unittest.skipIf(VALGRIND, "A big loop is used to check fuzz.")
def test_withdraw_nlocktime_fuzz(node_factory, bitcoind):
"""
Test that we eventually fuzz nLockTime for withdrawal transactions.
Marked flaky "just in case" as we fuzz from 0 to 100 with a 10%
probability.
"""
l1 = node_factory.get_node(1)
l1.fundwallet(10**8)
for i in range(100):
addr = l1.rpc.newaddr()["bech32"]
withdraw = l1.rpc.withdraw(addr, 10**3)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.
format(withdraw["txid"]))
decoded = bitcoind.rpc.decoderawtransaction(withdraw["tx"])
tip = node_factory.bitcoind.rpc.getblockcount()
assert decoded["locktime"] > 0
if decoded["locktime"] < tip:
return
else:
raise Exception("No transaction with fuzzed nLockTime !")
| [
"utils.only_one",
"subprocess.check_call",
"os.openpty",
"unittest.skipIf",
"os.path.join",
"time.sleep",
"pyln.client.Millisatoshi",
"pytest.raises",
"decimal.Decimal",
"utils.sync_blockheight"
] | [((371, 487), 'unittest.skipIf', 'unittest.skipIf', (["(TEST_NETWORK != 'regtest')", '"""Test relies on a number of example addresses valid only in regtest"""'], {}), "(TEST_NETWORK != 'regtest',\n 'Test relies on a number of example addresses valid only in regtest')\n", (386, 487), False, 'import unittest\n'), ((8919, 8964), 'unittest.skipIf', 'unittest.skipIf', (['(not COMPAT)', '"""needs COMPAT=1"""'], {}), "(not COMPAT, 'needs COMPAT=1')\n", (8934, 8964), False, 'import unittest\n'), ((21245, 21342), 'unittest.skipIf', 'unittest.skipIf', (["(TEST_NETWORK != 'regtest')", '"""Fee outputs throw off our output matching logic"""'], {}), "(TEST_NETWORK != 'regtest',\n 'Fee outputs throw off our output matching logic')\n", (21260, 21342), False, 'import unittest\n'), ((21340, 21458), 'unittest.skipIf', 'unittest.skipIf', (['(not EXPERIMENTAL_FEATURES)', '"""Tests annotations which are compiled only with experimental features"""'], {}), "(not EXPERIMENTAL_FEATURES,\n 'Tests annotations which are compiled only with experimental features')\n", (21355, 21458), False, 'import unittest\n'), ((24118, 24204), 'unittest.skipIf', 'unittest.skipIf', (['VALGRIND', '"""It does not play well with prompt and key derivation."""'], {}), "(VALGRIND,\n 'It does not play well with prompt and key derivation.')\n", (24133, 24204), False, 'import unittest\n'), ((25954, 26040), 'unittest.skipIf', 'unittest.skipIf', (['VALGRIND', '"""It does not play well with prompt and key derivation."""'], {}), "(VALGRIND,\n 'It does not play well with prompt and key derivation.')\n", (25969, 26040), False, 'import unittest\n'), ((29278, 29340), 'unittest.skipIf', 'unittest.skipIf', (['VALGRIND', '"""A big loop is used to check fuzz."""'], {}), "(VALGRIND, 'A big loop is used to check fuzz.')\n", (29293, 29340), False, 'import unittest\n'), ((1882, 1916), 'utils.sync_blockheight', 'sync_blockheight', (['l1.bitcoin', '[l1]'], {}), '(l1.bitcoin, [l1])\n', (1898, 1916), False, 'from utils import only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT, VALGRIND\n'), ((8754, 8767), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8764, 8767), False, 'import time\n'), ((9345, 9377), 'utils.sync_blockheight', 'sync_blockheight', (['bitcoind', '[l1]'], {}), '(bitcoind, [l1])\n', (9361, 9377), False, 'from utils import only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT, VALGRIND\n'), ((23167, 23203), 'utils.sync_blockheight', 'sync_blockheight', (['bitcoind', '[l1, l2]'], {}), '(bitcoind, [l1, l2])\n', (23183, 23203), False, 'from utils import only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT, VALGRIND\n'), ((24453, 24465), 'os.openpty', 'os.openpty', ([], {}), '()\n', (24463, 24465), False, 'import os\n'), ((26163, 26228), 'os.path.join', 'os.path.join', (['l1.daemon.lightning_dir', 'TEST_NETWORK', '"""hsm_secret"""'], {}), "(l1.daemon.lightning_dir, TEST_NETWORK, 'hsm_secret')\n", (26175, 26228), False, 'import os\n'), ((26324, 26336), 'os.openpty', 'os.openpty', ([], {}), '()\n', (26334, 26336), False, 'import os\n'), ((26989, 27020), 'subprocess.check_call', 'subprocess.check_call', (['cmd_line'], {}), '(cmd_line)\n', (27010, 27020), False, 'import subprocess\n'), ((27306, 27337), 'subprocess.check_call', 'subprocess.check_call', (['cmd_line'], {}), '(cmd_line)\n', (27327, 27337), False, 'import subprocess\n'), ((27637, 27649), 'os.openpty', 'os.openpty', ([], {}), '()\n', (27647, 27649), False, 'import os\n'), ((28097, 28128), 'subprocess.check_call', 'subprocess.check_call', (['cmd_line'], {}), '(cmd_line)\n', (28118, 28128), False, 'import subprocess\n'), ((1236, 1259), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (1249, 1259), False, 'import pytest\n'), ((1320, 1343), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (1333, 1343), False, 'import pytest\n'), ((1402, 1425), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (1415, 1425), False, 'import pytest\n'), ((1476, 1534), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '"""Cannot afford transaction"""'}), "(RpcError, match='Cannot afford transaction')\n", (1489, 1534), False, 'import pytest\n'), ((1827, 1842), 'decimal.Decimal', 'Decimal', (['"""0.02"""'], {}), "('0.02')\n", (1834, 1842), False, 'from decimal import Decimal\n'), ((2972, 2995), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (2985, 2995), False, 'import pytest\n'), ((3088, 3111), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (3101, 3111), False, 'import pytest\n'), ((3204, 3227), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (3217, 3227), False, 'import pytest\n'), ((3728, 3751), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (3741, 3751), False, 'import pytest\n'), ((3864, 3887), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (3877, 3887), False, 'import pytest\n'), ((4000, 4023), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (4013, 4023), False, 'import pytest\n'), ((4456, 4479), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (4469, 4479), False, 'import pytest\n'), ((4572, 4595), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (4585, 4595), False, 'import pytest\n'), ((4766, 4789), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (4779, 4789), False, 'import pytest\n'), ((4868, 4891), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (4881, 4891), False, 'import pytest\n'), ((4985, 5008), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (4998, 5008), False, 'import pytest\n'), ((5165, 5188), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (5178, 5188), False, 'import pytest\n'), ((5292, 5315), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (5305, 5315), False, 'import pytest\n'), ((5472, 5495), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (5485, 5495), False, 'import pytest\n'), ((6143, 6201), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '"""Cannot afford transaction"""'}), "(RpcError, match='Cannot afford transaction')\n", (6156, 6201), False, 'import pytest\n'), ((7791, 7814), 'pytest.raises', 'pytest.raises', (['RpcError'], {}), '(RpcError)\n', (7804, 7814), False, 'import pytest\n'), ((9468, 9555), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '""".* should be an amount in satoshis or all, not .*"""'}), "(RpcError, match=\n '.* should be an amount in satoshis or all, not .*')\n", (9481, 9555), False, 'import pytest\n'), ((9612, 9674), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '"""Need set \\\\\'satoshi\\\\\' field."""'}), '(RpcError, match="Need set \\\\\'satoshi\\\\\' field.")\n', (9625, 9674), False, 'import pytest\n'), ((9726, 9796), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '"""Could not parse destination address.*"""'}), "(RpcError, match='Could not parse destination address.*')\n", (9739, 9796), False, 'import pytest\n'), ((10192, 10254), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '"""Need set \\\\\'outputs\\\\\' field."""'}), '(RpcError, match="Need set \\\\\'outputs\\\\\' field.")\n', (10205, 10254), False, 'import pytest\n'), ((10340, 10402), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '"""Need set \\\\\'outputs\\\\\' field."""'}), '(RpcError, match="Need set \\\\\'outputs\\\\\' field.")\n', (10353, 10402), False, 'import pytest\n'), ((14257, 14312), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '"""not an unreleased txid"""'}), "(RpcError, match='not an unreleased txid')\n", (14270, 14312), False, 'import pytest\n'), ((16151, 16273), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '"""this destination wants all satoshi. The count of outputs can\\\\\'t be more than 1"""'}), '(RpcError, match=\n "this destination wants all satoshi. The count of outputs can\\\\\'t be more than 1"\n )\n', (16164, 16273), False, 'import pytest\n'), ((18277, 18332), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '"""not an unreleased txid"""'}), "(RpcError, match='not an unreleased txid')\n", (18290, 18332), False, 'import pytest\n'), ((20273, 20328), 'pytest.raises', 'pytest.raises', (['RpcError'], {'match': '"""not an unreleased txid"""'}), "(RpcError, match='not an unreleased txid')\n", (20286, 20328), False, 'import pytest\n'), ((25016, 25106), 'pytest.raises', 'pytest.raises', (['subprocess.CalledProcessError'], {'match': '"""returned non-zero exit status 1"""'}), "(subprocess.CalledProcessError, match=\n 'returned non-zero exit status 1')\n", (25029, 25106), False, 'import pytest\n'), ((25112, 25153), 'subprocess.check_call', 'subprocess.check_call', (['l1.daemon.cmd_line'], {}), '(l1.daemon.cmd_line)\n', (25133, 25153), False, 'import subprocess\n'), ((26836, 26880), 'pytest.raises', 'pytest.raises', (['subprocess.CalledProcessError'], {}), '(subprocess.CalledProcessError)\n', (26849, 26880), False, 'import pytest\n'), ((26890, 26921), 'subprocess.check_call', 'subprocess.check_call', (['cmd_line'], {}), '(cmd_line)\n', (26911, 26921), False, 'import subprocess\n'), ((27421, 27511), 'pytest.raises', 'pytest.raises', (['subprocess.CalledProcessError'], {'match': '"""returned non-zero exit status 1"""'}), "(subprocess.CalledProcessError, match=\n 'returned non-zero exit status 1')\n", (27434, 27511), False, 'import pytest\n'), ((27517, 27558), 'subprocess.check_call', 'subprocess.check_call', (['l1.daemon.cmd_line'], {}), '(l1.daemon.cmd_line)\n', (27538, 27558), False, 'import subprocess\n'), ((2599, 2616), 'utils.only_one', 'only_one', (['outputs'], {}), '(outputs)\n', (2607, 2616), False, 'from utils import only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT, VALGRIND\n'), ((8396, 8413), 'utils.only_one', 'only_one', (['outputs'], {}), '(outputs)\n', (8404, 8413), False, 'from utils import only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT, VALGRIND\n'), ((9910, 9936), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 100)'], {}), '(amount * 100)\n', (9922, 9936), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((9986, 10012), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 100)'], {}), '(amount * 100)\n', (9998, 10012), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((10061, 10087), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 100)'], {}), '(amount * 100)\n', (10073, 10087), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((10135, 10161), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 100)'], {}), '(amount * 100)\n', (10147, 10161), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((10576, 10602), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 100)'], {}), '(amount * 100)\n', (10588, 10602), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((10705, 10731), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 100)'], {}), '(amount * 100)\n', (10717, 10731), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((10820, 10846), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 100)'], {}), '(amount * 100)\n', (10832, 10846), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((13122, 13141), 'decimal.Decimal', 'Decimal', (['(amount * 6)'], {}), '(amount * 6)\n', (13129, 13141), False, 'from decimal import Decimal\n'), ((13220, 13235), 'decimal.Decimal', 'Decimal', (['(0.0002)'], {}), '(0.0002)\n', (13227, 13235), False, 'from decimal import Decimal\n'), ((13960, 13979), 'decimal.Decimal', 'Decimal', (['(amount * 4)'], {}), '(amount * 4)\n', (13967, 13979), False, 'from decimal import Decimal\n'), ((14058, 14073), 'decimal.Decimal', 'Decimal', (['(0.0002)'], {}), '(0.0002)\n', (14065, 14073), False, 'from decimal import Decimal\n'), ((14853, 14873), 'decimal.Decimal', 'Decimal', (['(amount * 10)'], {}), '(amount * 10)\n', (14860, 14873), False, 'from decimal import Decimal\n'), ((14953, 14968), 'decimal.Decimal', 'Decimal', (['(0.0003)'], {}), '(0.0003)\n', (14960, 14968), False, 'from decimal import Decimal\n'), ((15886, 15907), 'decimal.Decimal', 'Decimal', (['(amount * 3.5)'], {}), '(amount * 3.5)\n', (15893, 15907), False, 'from decimal import Decimal\n'), ((16940, 16961), 'decimal.Decimal', 'Decimal', (['"""0.01500100"""'], {}), "('0.01500100')\n", (16947, 16961), False, 'from decimal import Decimal\n'), ((18686, 18705), 'decimal.Decimal', 'Decimal', (['(amount * 3)'], {}), '(amount * 3)\n', (18693, 18705), False, 'from decimal import Decimal\n'), ((9833, 9859), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 100)'], {}), '(amount * 100)\n', (9845, 9859), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((10448, 10474), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 100)'], {}), '(amount * 100)\n', (10460, 10474), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((11233, 11259), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 100)'], {}), '(amount * 100)\n', (11245, 11259), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((13190, 13209), 'decimal.Decimal', 'Decimal', (['(amount * 6)'], {}), '(amount * 6)\n', (13197, 13209), False, 'from decimal import Decimal\n'), ((14028, 14047), 'decimal.Decimal', 'Decimal', (['(amount * 4)'], {}), '(amount * 4)\n', (14035, 14047), False, 'from decimal import Decimal\n'), ((14922, 14942), 'decimal.Decimal', 'Decimal', (['(amount * 10)'], {}), '(amount * 10)\n', (14929, 14942), False, 'from decimal import Decimal\n'), ((15351, 15384), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 3.5 * 1000)'], {}), '(amount * 3.5 * 1000)\n', (15363, 15384), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((16427, 16466), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 3 * 500 + 100000)'], {}), '(amount * 3 * 500 + 100000)\n', (16439, 16466), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((16506, 16545), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 3 * 500 - 100000)'], {}), '(amount * 3 * 500 - 100000)\n', (16518, 16545), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((17038, 17059), 'decimal.Decimal', 'Decimal', (['"""0.01499900"""'], {}), "('0.01499900')\n", (17045, 17059), False, 'from decimal import Decimal\n'), ((18161, 18192), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 3 * 1000)'], {}), '(amount * 3 * 1000)\n', (18173, 18192), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((18776, 18795), 'decimal.Decimal', 'Decimal', (['(amount * 3)'], {}), '(amount * 3)\n', (18783, 18795), False, 'from decimal import Decimal\n'), ((11980, 12011), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 3 * 1000)'], {}), '(amount * 3 * 1000)\n', (11992, 12011), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((16306, 16337), 'pyln.client.Millisatoshi', 'Millisatoshi', (['(amount * 3 * 1000)'], {}), '(amount * 3 * 1000)\n', (16318, 16337), False, 'from pyln.client import RpcError, Millisatoshi\n'), ((12396, 12415), 'decimal.Decimal', 'Decimal', (['(amount * 3)'], {}), '(amount * 3)\n', (12403, 12415), False, 'from decimal import Decimal\n')] |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
import collections
import os
import torch
import math
from fairseq import bleu, data, options, utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
from fairseq.multiprocessing_trainer import MultiprocessingTrainer
from fairseq.progress_bar import progress_bar
from fairseq.sequence_generator import SequenceGenerator
def main():
parser = options.get_parser('Trainer')
dataset_args = options.add_dataset_args(parser)
dataset_args.add_argument('--max-tokens', default=0, type=int, metavar='N',
help='maximum number of tokens in a batch')
dataset_args.add_argument('--batch-size', default=32, type=int, metavar='N',
help='batch size')
dataset_args.add_argument('--test-batch-size', default=32, type=int, metavar='N',
help='batch size for test set')
dataset_args.add_argument('--valid-batch-size', default=32, type=int, metavar='N',
help='batch size for validation set')
dataset_args.add_argument('--train-subset', default='train', metavar='SPLIT',
choices=['train', 'valid', 'test'],
help='data subset to use for training (train, valid, test)')
dataset_args.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list ofdata subsets '
' to use for validation (train, valid, valid1,test, test1)')
dataset_args.add_argument('--test-subset', default='test', metavar='SPLIT',
help='comma separated list ofdata subset '
'to use for testing (train, valid, test)')
dataset_args.add_argument('--valid-script', nargs='+', metavar='PATH', help='path to external validation script (optional).')
options.add_optimization_args(parser)
options.add_checkpoint_args(parser)
options.add_model_args(parser)
args = utils.parse_args_and_arch(parser)
print(args)
if args.no_progress_bar:
progress_bar.enabled = False
progress_bar.print_interval = args.log_interval
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
torch.manual_seed(args.seed)
# Setting args.max_tokens to infinity(same as setting to None)
if args.max_tokens == 0:
args.max_tokens = None
# Load dataset
dataset = data.load_with_check(args.data, args.source_lang, args.target_lang)
if args.source_lang is None or args.target_lang is None:
# record inferred languages in args, so that it's saved in checkpoints
args.source_lang, args.target_lang = dataset.src, dataset.dst
print('| [{}] dictionary: {} types'.format(dataset.src, len(dataset.src_dict)))
print('| [{}] dictionary: {} types'.format(dataset.dst, len(dataset.dst_dict)))
for split in dataset.splits:
print('| {} {} {} examples'.format(args.data, split, len(dataset.splits[split])))
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
num_gpus = torch.cuda.device_count()
print('| using {} GPUs (with max tokens per GPU = {})'.format(num_gpus, args.max_tokens))
# Build model
print('| model {}'.format(args.arch))
model = utils.build_model(args, dataset)
criterion = utils.build_criterion(args, dataset)
# Start multiprocessing
trainer = MultiprocessingTrainer(args, model)
# Load the latest checkpoint if one is available
epoch, batch_offset = trainer.load_checkpoint(os.path.join(args.save_dir, args.restore_file))
# Train until the learning rate gets too small
val_loss = None
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch <= max_epoch:
# train for one epoch
train(args, epoch, batch_offset, trainer, criterion, dataset, num_gpus)
# evaluate on validate set
for k, subset in enumerate(args.valid_subset.split(',')):
val_loss = validate(args, epoch, trainer, criterion, dataset, subset, num_gpus)
if k == 0:
if not args.no_save:
# save checkpoint
trainer.save_checkpoint(args, epoch, 0, val_loss, validation_script=args.valid_script)
# only use first validation loss to update the learning schedule
lr = trainer.lr_step(val_loss, epoch)
epoch += 1
batch_offset = 0
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
# Generate on test set and compute BLEU score
for beam in [1, 5, 10, 20]:
for subset in args.test_subset.split(','):
scorer = score_test(args, trainer.get_model(), dataset, subset, beam,
cuda_device=(0 if num_gpus > 0 else None))
print('| Test on {} with beam={}: {}'.format(subset, beam, scorer.result_string()))
# Stop multiprocessing
trainer.stop()
def train(args, epoch, batch_offset, trainer, criterion, dataset, num_gpus):
"""Train the model for one epoch."""
itr = dataset.dataloader(args.train_subset, batch_size=args.batch_size,
test_batch_size=args.test_batch_size,
valid_batch_size=args.valid_batch_size,
num_workers=args.workers,
max_tokens=args.max_tokens, seed=args.seed, epoch=epoch,
max_positions=args.max_positions,
sample_without_replacement=args.sample_without_replacement)
loss_meter = AverageMeter()
bsz_meter = AverageMeter() # sentences per batch
wpb_meter = AverageMeter() # words per batch
wps_meter = TimeMeter() # words per second
clip_meter = AverageMeter() # % of updates clipped
gnorm_meter = AverageMeter() # gradient norm
desc = '| epoch {:03d}'.format(epoch)
lr = trainer.get_lr()
with progress_bar(itr, desc, leave=False) as t:
for i, sample in data.skip_group_enumerator(t, num_gpus, batch_offset):
loss, grad_norm = trainer.train_step(sample, criterion)
ntokens = sum(s['ntokens'] for s in sample)
src_size = sum(s['src_tokens'].size(0) for s in sample)
loss_meter.update(loss, ntokens)
bsz_meter.update(src_size)
wpb_meter.update(ntokens)
wps_meter.update(ntokens)
clip_meter.update(1 if grad_norm > args.clip_norm else 0)
gnorm_meter.update(grad_norm)
t.set_postfix(collections.OrderedDict([
('loss', '{:.2f} ({:.2f})'.format(loss, loss_meter.avg)),
('wps', '{:5d}'.format(round(wps_meter.avg))),
('wpb', '{:5d}'.format(round(wpb_meter.avg))),
('bsz', '{:5d}'.format(round(bsz_meter.avg))),
('lr', lr),
('clip', '{:3.0f}%'.format(clip_meter.avg * 100)),
('gnorm', '{:.4f}'.format(gnorm_meter.avg)),
]))
if i == 0:
# ignore the first mini-batch in words-per-second calculation
wps_meter.reset()
if args.save_interval > 0 and (i + 1) % args.save_interval == 0:
trainer.save_checkpoint(args, epoch, i + 1)
fmt = desc + ' | train loss {:2.2f} | train ppl {:3.2f}'
fmt += ' | s/checkpoint {:7d} | words/s {:6d} | words/batch {:6d}'
fmt += ' | bsz {:5d} | lr {:0.6f} | clip {:3.0f}% | gnorm {:.4f}'
t.write(fmt.format(loss_meter.avg, math.pow(2, loss_meter.avg),
round(wps_meter.elapsed_time),
round(wps_meter.avg),
round(wpb_meter.avg),
round(bsz_meter.avg),
lr, clip_meter.avg * 100,
gnorm_meter.avg))
def validate(args, epoch, trainer, criterion, dataset, subset, ngpus):
"""Evaluate the model on the validation set and return the average loss."""
itr = dataset.dataloader(subset, batch_size=None,
max_tokens=args.max_tokens,
max_positions=args.max_positions)
loss_meter = AverageMeter()
desc = '| epoch {:03d} | valid on \'{}\' subset'.format(epoch, subset)
with progress_bar(itr, desc, leave=False) as t:
for _, sample in data.skip_group_enumerator(t, ngpus):
ntokens = sum(s['ntokens'] for s in sample)
loss = trainer.valid_step(sample, criterion)
loss_meter.update(loss, ntokens)
t.set_postfix(loss='{:.2f}'.format(loss_meter.avg))
val_loss = loss_meter.avg
t.write(desc + ' | valid loss {:2.2f} | valid ppl {:3.2f}'
.format(val_loss, math.pow(2, val_loss)))
# update and return the learning rate
return val_loss
def score_test(args, model, dataset, subset, beam, cuda_device):
"""Evaluate the model on the test set and return the BLEU scorer."""
translator = SequenceGenerator([model], dataset.dst_dict, beam_size=beam)
if torch.cuda.is_available():
translator.cuda()
scorer = bleu.Scorer(dataset.dst_dict.pad(), dataset.dst_dict.eos(), dataset.dst_dict.unk())
itr = dataset.dataloader(subset, batch_size=4, max_positions=args.max_positions)
for _, _, ref, hypos in translator.generate_batched_itr(itr, cuda_device=cuda_device):
scorer.add(ref.int().cpu(), hypos[0]['tokens'].int().cpu())
return scorer
if __name__ == '__main__':
main()
| [
"fairseq.progress_bar.progress_bar",
"torch.cuda.device_count",
"torch.cuda.is_available",
"fairseq.data.load_with_check",
"os.path.exists",
"fairseq.options.add_dataset_args",
"fairseq.utils.build_criterion",
"fairseq.options.add_checkpoint_args",
"fairseq.sequence_generator.SequenceGenerator",
"fairseq.options.get_parser",
"fairseq.data.skip_group_enumerator",
"fairseq.utils.parse_args_and_arch",
"torch.manual_seed",
"fairseq.meters.StopwatchMeter",
"fairseq.options.add_optimization_args",
"fairseq.utils.build_model",
"fairseq.multiprocessing_trainer.MultiprocessingTrainer",
"fairseq.meters.AverageMeter",
"os.makedirs",
"math.pow",
"os.path.join",
"fairseq.options.add_model_args",
"fairseq.meters.TimeMeter"
] | [((654, 683), 'fairseq.options.get_parser', 'options.get_parser', (['"""Trainer"""'], {}), "('Trainer')\n", (672, 683), False, 'from fairseq import bleu, data, options, utils\n'), ((703, 735), 'fairseq.options.add_dataset_args', 'options.add_dataset_args', (['parser'], {}), '(parser)\n', (727, 735), False, 'from fairseq import bleu, data, options, utils\n'), ((2180, 2217), 'fairseq.options.add_optimization_args', 'options.add_optimization_args', (['parser'], {}), '(parser)\n', (2209, 2217), False, 'from fairseq import bleu, data, options, utils\n'), ((2222, 2257), 'fairseq.options.add_checkpoint_args', 'options.add_checkpoint_args', (['parser'], {}), '(parser)\n', (2249, 2257), False, 'from fairseq import bleu, data, options, utils\n'), ((2262, 2292), 'fairseq.options.add_model_args', 'options.add_model_args', (['parser'], {}), '(parser)\n', (2284, 2292), False, 'from fairseq import bleu, data, options, utils\n'), ((2305, 2338), 'fairseq.utils.parse_args_and_arch', 'utils.parse_args_and_arch', (['parser'], {}), '(parser)\n', (2330, 2338), False, 'from fairseq import bleu, data, options, utils\n'), ((2560, 2588), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2577, 2588), False, 'import torch\n'), ((2751, 2818), 'fairseq.data.load_with_check', 'data.load_with_check', (['args.data', 'args.source_lang', 'args.target_lang'], {}), '(args.data, args.source_lang, args.target_lang)\n', (2771, 2818), False, 'from fairseq import bleu, data, options, utils\n'), ((3445, 3470), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3468, 3470), False, 'import torch\n'), ((3639, 3671), 'fairseq.utils.build_model', 'utils.build_model', (['args', 'dataset'], {}), '(args, dataset)\n', (3656, 3671), False, 'from fairseq import bleu, data, options, utils\n'), ((3688, 3724), 'fairseq.utils.build_criterion', 'utils.build_criterion', (['args', 'dataset'], {}), '(args, dataset)\n', (3709, 3724), False, 'from fairseq import bleu, data, options, utils\n'), ((3768, 3803), 'fairseq.multiprocessing_trainer.MultiprocessingTrainer', 'MultiprocessingTrainer', (['args', 'model'], {}), '(args, model)\n', (3790, 3803), False, 'from fairseq.multiprocessing_trainer import MultiprocessingTrainer\n'), ((4115, 4131), 'fairseq.meters.StopwatchMeter', 'StopwatchMeter', ([], {}), '()\n', (4129, 4131), False, 'from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\n'), ((6069, 6083), 'fairseq.meters.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6081, 6083), False, 'from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\n'), ((6100, 6114), 'fairseq.meters.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6112, 6114), False, 'from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\n'), ((6156, 6170), 'fairseq.meters.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6168, 6170), False, 'from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\n'), ((6208, 6219), 'fairseq.meters.TimeMeter', 'TimeMeter', ([], {}), '()\n', (6217, 6219), False, 'from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\n'), ((6262, 6276), 'fairseq.meters.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6274, 6276), False, 'from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\n'), ((6320, 6334), 'fairseq.meters.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6332, 6334), False, 'from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\n'), ((8714, 8728), 'fairseq.meters.AverageMeter', 'AverageMeter', ([], {}), '()\n', (8726, 8728), False, 'from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\n'), ((9523, 9583), 'fairseq.sequence_generator.SequenceGenerator', 'SequenceGenerator', (['[model]', 'dataset.dst_dict'], {'beam_size': 'beam'}), '([model], dataset.dst_dict, beam_size=beam)\n', (9540, 9583), False, 'from fairseq.sequence_generator import SequenceGenerator\n'), ((9591, 9616), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9614, 9616), False, 'import torch\n'), ((2490, 2519), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (2504, 2519), False, 'import os\n'), ((2529, 2555), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {}), '(args.save_dir)\n', (2540, 2555), False, 'import os\n'), ((3333, 3358), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3356, 3358), False, 'import torch\n'), ((3908, 3954), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.restore_file'], {}), '(args.save_dir, args.restore_file)\n', (3920, 3954), False, 'import os\n'), ((6430, 6466), 'fairseq.progress_bar.progress_bar', 'progress_bar', (['itr', 'desc'], {'leave': '(False)'}), '(itr, desc, leave=False)\n', (6442, 6466), False, 'from fairseq.progress_bar import progress_bar\n'), ((6498, 6551), 'fairseq.data.skip_group_enumerator', 'data.skip_group_enumerator', (['t', 'num_gpus', 'batch_offset'], {}), '(t, num_gpus, batch_offset)\n', (6524, 6551), False, 'from fairseq import bleu, data, options, utils\n'), ((8814, 8850), 'fairseq.progress_bar.progress_bar', 'progress_bar', (['itr', 'desc'], {'leave': '(False)'}), '(itr, desc, leave=False)\n', (8826, 8850), False, 'from fairseq.progress_bar import progress_bar\n'), ((8882, 8918), 'fairseq.data.skip_group_enumerator', 'data.skip_group_enumerator', (['t', 'ngpus'], {}), '(t, ngpus)\n', (8908, 8918), False, 'from fairseq import bleu, data, options, utils\n'), ((8037, 8064), 'math.pow', 'math.pow', (['(2)', 'loss_meter.avg'], {}), '(2, loss_meter.avg)\n', (8045, 8064), False, 'import math\n'), ((9278, 9299), 'math.pow', 'math.pow', (['(2)', 'val_loss'], {}), '(2, val_loss)\n', (9286, 9299), False, 'import math\n')] |
from django.core.management.base import BaseCommand
from django.utils import termcolors
from jsonschema import Draft4Validator
from jsonschema.exceptions import SchemaError
import json
class Command(BaseCommand):
can_import_settings = True
@property
def _jsonschema_exist(self):
from django.conf import settings
if not hasattr(settings, 'SIMPLE_JSONSCHEMA'):
return False
return True
@property
def _jsonschema_errors(self):
from django.conf import settings
errors = []
schemas = settings.SIMPLE_JSONSCHEMA
for url, schema in schemas.items():
try:
Draft4Validator.check_schema(schema)
except SchemaError as e:
errors.append({
'url': url,
'error': e,
'schema': json.dumps(schema, indent=4, sort_keys=True)
})
return errors
def handle(self, *args, **options):
success = termcolors.make_style(fg='green')
error = termcolors.make_style(fg='red')
if not self._jsonschema_exist:
not_exist = '[' + error('ERROR') + '] SIMPLE_JSONSCHEMA is not exist in settings.'
self.stdout.write(not_exist)
return
errors = self._jsonschema_errors
if len(errors):
for e in errors:
title = '\n[' + error('ERROR') + '] schema of ' + str(e['url']) + ' is invalid.'
self.stdout.write(title)
self.stdout.write('path: ' + str(list(e['error'].path)))
self.stdout.write('message: ' + e['error'].message)
self.stdout.write('schema:\n' + e['schema'] + '\n')
else:
self.stdout.write('[' + success('SUCCESS') + '] All jsonschemas are OK.')
| [
"jsonschema.Draft4Validator.check_schema",
"django.utils.termcolors.make_style",
"json.dumps"
] | [((1013, 1046), 'django.utils.termcolors.make_style', 'termcolors.make_style', ([], {'fg': '"""green"""'}), "(fg='green')\n", (1034, 1046), False, 'from django.utils import termcolors\n'), ((1063, 1094), 'django.utils.termcolors.make_style', 'termcolors.make_style', ([], {'fg': '"""red"""'}), "(fg='red')\n", (1084, 1094), False, 'from django.utils import termcolors\n'), ((668, 704), 'jsonschema.Draft4Validator.check_schema', 'Draft4Validator.check_schema', (['schema'], {}), '(schema)\n', (696, 704), False, 'from jsonschema import Draft4Validator\n'), ((868, 912), 'json.dumps', 'json.dumps', (['schema'], {'indent': '(4)', 'sort_keys': '(True)'}), '(schema, indent=4, sort_keys=True)\n', (878, 912), False, 'import json\n')] |
import cv2, time
import numpy as np
import Tkinter
"""
Wraps up some interfaces to opencv user interface methods (displaying
image frames, event handling, etc).
If desired, an alternative UI could be built and imported into get_pulse.py
instead. Opencv is used to perform much of the data analysis, but there is no
reason it has to be used to handle the UI as well. It just happens to be very
effective for our purposes.
"""
def resize(*args, **kwargs):
return cv2.resize(*args, **kwargs)
def moveWindow(*args,**kwargs):
return
def imshow(root,args,kwargs):
image = cv2.cvtColor(output_frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
return Tkinter.Label(root, image=kwargs).pack()
#return cv2.imshow(*args,**kwargs)
def destroyWindow(*args,**kwargs):
return cv2.destroyWindow(*args,**kwargs)
def waitKey(*args,**kwargs):
return cv2.waitKey(*args,**kwargs)
"""
The rest of this file defines some GUI plotting functionality. There are plenty
of other ways to do simple x-y data plots in python, but this application uses
cv2.imshow to do real-time data plotting and handle user interaction.
This is entirely independent of the data calculation functions, so it can be
replaced in the get_pulse.py application easily.
"""
def combine(left, right):
"""Stack images horizontally.
"""
h = max(left.shape[0], right.shape[0])
w = left.shape[1] + right.shape[1]
hoff = left.shape[0]
shape = list(left.shape)
shape[0] = h
shape[1] = w
comb = np.zeros(tuple(shape),left.dtype)
# left will be on left, aligned top, with right on right
comb[:left.shape[0],:left.shape[1]] = left
comb[:right.shape[0],left.shape[1]:] = right
return comb
def plotXY(data,size = (280,640),margin = 25,name = "data",labels=[], skip = [],
showmax = [], bg = None,label_ndigits = [], showmax_digits=[]):
for x,y in data:
if len(x) < 2 or len(y) < 2:
return
n_plots = len(data)
w = float(size[1])
h = size[0]/float(n_plots)
z = np.zeros((size[0],size[1],3))
if isinstance(bg,np.ndarray):
wd = int(bg.shape[1]/bg.shape[0]*h )
bg = cv2.resize(bg,(wd,int(h)))
if len(bg.shape) == 3:
r = combine(bg[:,:,0],z[:,:,0])
g = combine(bg[:,:,1],z[:,:,1])
b = combine(bg[:,:,2],z[:,:,2])
else:
r = combine(bg,z[:,:,0])
g = combine(bg,z[:,:,1])
b = combine(bg,z[:,:,2])
z = cv2.merge([r,g,b])[:,:-wd,]
i = 0
P = []
for x,y in data:
x = np.array(x)
y = -np.array(y)
xx = (w-2*margin)*(x - x.min()) / (x.max() - x.min())+margin
yy = (h-2*margin)*(y - y.min()) / (y.max() - y.min())+margin + i*h
mx = max(yy)
if labels:
if labels[i]:
for ii in range(len(x)):
if ii%skip[i] == 0:
col = (255,255,255)
ss = '{0:.%sf}' % label_ndigits[i]
ss = ss.format(x[ii])
cv2.putText(z,ss,(int(xx[ii]),int((i+1)*h)),
cv2.FONT_HERSHEY_PLAIN,1,col)
if showmax:
if showmax[i]:
col = (0,255,0)
ii = np.argmax(-y)
ss = '{0:.%sf} %s' % (showmax_digits[i], showmax[i])
ss = ss.format(x[ii])
#"%0.0f %s" % (x[ii], showmax[i])
cv2.putText(z,ss,(int(xx[ii]),int((yy[ii]))),
cv2.FONT_HERSHEY_PLAIN,2,col)
try:
pts = np.array([[x_, y_] for x_, y_ in zip(xx,yy)],np.int32)
i+=1
P.append(pts)
except ValueError:
pass #temporary
"""
#Polylines seems to have some trouble rendering multiple polys for some people
for p in P:
cv2.polylines(z, [p], False, (255,255,255),1)
"""
#hack-y alternative:
for p in P:
for i in range(len(p)-1):
cv2.line(z,tuple(p[i]),tuple(p[i+1]), (255,255,255),1)
return z
#cv2.imshow(name,z)
| [
"Tkinter.Label",
"cv2.merge",
"cv2.destroyWindow",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"cv2.cvtColor",
"cv2.resize",
"cv2.waitKey"
] | [((468, 495), 'cv2.resize', 'cv2.resize', (['*args'], {}), '(*args, **kwargs)\n', (478, 495), False, 'import cv2, time\n'), ((583, 628), 'cv2.cvtColor', 'cv2.cvtColor', (['output_frame', 'cv2.COLOR_BGR2RGB'], {}), '(output_frame, cv2.COLOR_BGR2RGB)\n', (595, 628), False, 'import cv2, time\n'), ((845, 879), 'cv2.destroyWindow', 'cv2.destroyWindow', (['*args'], {}), '(*args, **kwargs)\n', (862, 879), False, 'import cv2, time\n'), ((920, 948), 'cv2.waitKey', 'cv2.waitKey', (['*args'], {}), '(*args, **kwargs)\n', (931, 948), False, 'import cv2, time\n'), ((2127, 2158), 'numpy.zeros', 'np.zeros', (['(size[0], size[1], 3)'], {}), '((size[0], size[1], 3))\n', (2135, 2158), True, 'import numpy as np\n'), ((2672, 2683), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2680, 2683), True, 'import numpy as np\n'), ((713, 746), 'Tkinter.Label', 'Tkinter.Label', (['root'], {'image': 'kwargs'}), '(root, image=kwargs)\n', (726, 746), False, 'import Tkinter\n'), ((2581, 2601), 'cv2.merge', 'cv2.merge', (['[r, g, b]'], {}), '([r, g, b])\n', (2590, 2601), False, 'import cv2, time\n'), ((2697, 2708), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2705, 2708), True, 'import numpy as np\n'), ((3409, 3422), 'numpy.argmax', 'np.argmax', (['(-y)'], {}), '(-y)\n', (3418, 3422), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2018 NLTK Project
# Author: <NAME> <<EMAIL>>
# based on previous (nltk2) version by
# <NAME>, <NAME>, <NAME>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, division
from collections import defaultdict, Counter
from nltk.tag import TaggerI
from nltk.tbl import Feature, Template
from nltk import jsontags
######################################################################
# Brill Templates
######################################################################
@jsontags.register_tag
class Word(Feature):
"""
Feature which examines the text (word) of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Word'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's text."""
return tokens[index][0]
@jsontags.register_tag
class Pos(Feature):
"""
Feature which examines the tags of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Pos'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's tag."""
return tokens[index][1]
def nltkdemo18():
"""
Return 18 templates, from the original nltk demo, in multi-feature syntax
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-3, -2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-1]), Word([1])),
]
def nltkdemo18plus():
"""
Return 18 templates, from the original nltk demo, and additionally a few
multi-feature ones (the motivation is easy comparison with nltkdemo18)
"""
return nltkdemo18() + [
Template(Word([-1]), Pos([1])),
Template(Pos([-1]), Word([1])),
Template(Word([-1]), Word([0]), Pos([1])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-1]), Word([0]), Pos([1])),
]
def fntbl37():
"""
Return 37 templates taken from the postagging task of the
fntbl distribution http://www.cs.jhu.edu/~rflorian/fntbl/
(37 is after excluding a handful which do not condition on Pos[0];
fntbl can do that but the current nltk implementation cannot.)
"""
return [
Template(Word([0]), Word([1]), Word([2])),
Template(Word([-1]), Word([0]), Word([1])),
Template(Word([0]), Word([-1])),
Template(Word([0]), Word([1])),
Template(Word([0]), Word([2])),
Template(Word([0]), Word([-2])),
Template(Word([1, 2])),
Template(Word([-2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-3, -2, -1])),
Template(Word([0]), Pos([2])),
Template(Word([0]), Pos([-2])),
Template(Word([0]), Pos([1])),
Template(Word([0]), Pos([-1])),
Template(Word([0])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([1])),
Template(Word([-1])),
Template(Pos([-1]), Pos([1])),
Template(Pos([1]), Pos([2])),
Template(Pos([-1]), Pos([-2])),
Template(Pos([1])),
Template(Pos([-1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([1, 2, 3])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([-2, -1])),
Template(Pos([1]), Word([0]), Word([1])),
Template(Pos([1]), Word([0]), Word([-1])),
Template(Pos([-1]), Word([-1]), Word([0])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Pos([1]), Pos([2]), Word([1]))
]
def brill24():
"""
Return 24 templates of the seminal TBL paper, Brill (1995)
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-1, 0])),
Template(Word([0, 1])),
Template(Word([0])),
Template(Word([-1]), Pos([-1])),
Template(Word([1]), Pos([1])),
Template(Word([0]), Word([-1]), Pos([-1])),
Template(Word([0]), Word([1]), Pos([1])),
]
def describe_template_sets():
"""
Print the available template sets in this demo, with a short description"
"""
import inspect
import sys
# a bit of magic to get all functions in this module
templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
for (name, obj) in templatesets:
if name == "describe_template_sets":
continue
print(name, obj.__doc__, "\n")
######################################################################
# The Brill Tagger
######################################################################
@jsontags.register_tag
class BrillTagger(TaggerI):
"""
Brill's transformational rule-based tagger. Brill taggers use an
initial tagger (such as ``tag.DefaultTagger``) to assign an initial
tag sequence to a text; and then apply an ordered list of
transformational rules to correct the tags of individual tokens.
These transformation rules are specified by the ``TagRule``
interface.
Brill taggers can be created directly, from an initial tagger and
a list of transformational rules; but more often, Brill taggers
are created by learning rules from a training corpus, using one
of the TaggerTrainers available.
"""
json_tag = 'nltk.tag.BrillTagger'
def __init__(self, initial_tagger, rules, training_stats=None):
"""
:param initial_tagger: The initial tagger
:type initial_tagger: TaggerI
:param rules: An ordered list of transformation rules that
should be used to correct the initial tagging.
:type rules: list(TagRule)
:param training_stats: A dictionary of statistics collected
during training, for possible later use
:type training_stats: dict
"""
self._initial_tagger = initial_tagger
self._rules = tuple(rules)
self._training_stats = training_stats
def encode_json_obj(self):
return self._initial_tagger, self._rules, self._training_stats
@classmethod
def decode_json_obj(cls, obj):
_initial_tagger, _rules, _training_stats = obj
return cls(_initial_tagger, _rules, _training_stats)
def rules(self):
"""
Return the ordered list of transformation rules that this tagger has learnt
:return: the ordered list of transformation rules that correct the initial tagging
:rtype: list of Rules
"""
return self._rules
def train_stats(self, statistic=None):
"""
Return a named statistic collected during training, or a dictionary of all
available statistics if no name given
:param statistic: name of statistic
:type statistic: str
:return: some statistic collected during training of this tagger
:rtype: any (but usually a number)
"""
if statistic is None:
return self._training_stats
else:
return self._training_stats.get(statistic)
def tag(self, tokens):
# Inherit documentation from TaggerI
# Run the initial tagger.
tagged_tokens = self._initial_tagger.tag(tokens)
# Create a dictionary that maps each tag to a list of the
# indices of tokens that have that tag.
tag_to_positions = defaultdict(set)
for i, (token, tag) in enumerate(tagged_tokens):
tag_to_positions[tag].add(i)
# Apply each rule, in order. Only try to apply rules at
# positions that have the desired original tag.
for rule in self._rules:
# Find the positions where it might apply
positions = tag_to_positions.get(rule.original_tag, [])
# Apply the rule at those positions.
changed = rule.apply(tagged_tokens, positions)
# Update tag_to_positions with the positions of tags that
# were modified.
for i in changed:
tag_to_positions[rule.original_tag].remove(i)
tag_to_positions[rule.replacement_tag].add(i)
return tagged_tokens
def print_template_statistics(self, test_stats=None, printunused=True):
"""
Print a list of all templates, ranked according to efficiency.
If test_stats is available, the templates are ranked according to their
relative contribution (summed for all rules created from a given template,
weighted by score) to the performance on the test set. If no test_stats, then
statistics collected during training are used instead. There is also
an unweighted measure (just counting the rules). This is less informative,
though, as many low-score rules will appear towards end of training.
:param test_stats: dictionary of statistics collected during testing
:type test_stats: dict of str -> any (but usually numbers)
:param printunused: if True, print a list of all unused templates
:type printunused: bool
:return: None
:rtype: None
"""
tids = [r.templateid for r in self._rules]
train_stats = self.train_stats()
trainscores = train_stats['rulescores']
assert len(trainscores) == len(tids), "corrupt statistics: " \
"{0} train scores for {1} rules".format(trainscores, tids)
template_counts = Counter(tids)
weighted_traincounts = Counter()
for (tid, score) in zip(tids, trainscores):
weighted_traincounts[tid] += score
tottrainscores = sum(trainscores)
# det_tplsort() is for deterministic sorting;
# the otherwise convenient Counter.most_common() unfortunately
# does not break ties deterministically
# between python versions and will break cross-version tests
def det_tplsort(tpl_value):
return (tpl_value[1], repr(tpl_value[0]))
def print_train_stats():
print("TEMPLATE STATISTICS (TRAIN) {0} templates, {1} rules)".format(
len(template_counts),
len(tids))
)
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
head = "#ID | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
train_tplscores = sorted(weighted_traincounts.items(), key=det_tplsort, reverse=True)
for (tid, trainscore) in train_tplscores:
s = "{0} | {1:5d} {2:5.3f} |{3:4d} {4:.3f} | {5}".format(
tid,
trainscore,
trainscore/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_testtrain_stats():
testscores = test_stats['rulescores']
print("TEMPLATE STATISTICS (TEST AND TRAIN) ({0} templates, {1} rules)".format(
len(template_counts),
len(tids)),
)
print("TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats))
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
weighted_testcounts = Counter()
for (tid, score) in zip(tids, testscores):
weighted_testcounts[tid] += score
tottestscores = sum(testscores)
head = "#ID | Score (test) | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
test_tplscores = sorted(weighted_testcounts.items(), key=det_tplsort, reverse=True)
for (tid, testscore) in test_tplscores:
s = "{0:s} |{1:5d} {2:6.3f} | {3:4d} {4:.3f} |{5:4d} {6:.3f} | {7:s}".format(
tid,
testscore,
testscore/tottestscores,
weighted_traincounts[tid],
weighted_traincounts[tid]/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_unused_templates():
usedtpls = set(int(tid) for tid in tids)
unused = [(tid, tpl) for (tid, tpl) in enumerate(Template.ALLTEMPLATES) if tid not in usedtpls]
print("UNUSED TEMPLATES ({0})".format(len(unused)))
for (tid, tpl) in unused:
print("{0:03d} {1:s}".format(tid, str(tpl)))
if test_stats is None:
print_train_stats()
else:
print_testtrain_stats()
print()
if printunused:
print_unused_templates()
print()
def batch_tag_incremental(self, sequences, gold):
"""
Tags by applying each rule to the entire corpus (rather than all rules to a
single sequence). The point is to collect statistics on the test set for
individual rules.
NOTE: This is inefficient (does not build any index, so will traverse the entire
corpus N times for N rules) -- usually you would not care about statistics for
individual rules and thus use batch_tag() instead
:param sequences: lists of token sequences (sentences, in some applications) to be tagged
:type sequences: list of list of strings
:param gold: the gold standard
:type gold: list of list of strings
:returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule))
"""
def counterrors(xs):
return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair))
testing_stats = {}
testing_stats['tokencount'] = sum(len(t) for t in sequences)
testing_stats['sequencecount'] = len(sequences)
tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences]
testing_stats['initialerrors'] = counterrors(tagged_tokenses)
testing_stats['initialacc'] = 1 - testing_stats['initialerrors']/testing_stats['tokencount']
# Apply each rule to the entire corpus, in order
errors = [testing_stats['initialerrors']]
for rule in self._rules:
for tagged_tokens in tagged_tokenses:
rule.apply(tagged_tokens)
errors.append(counterrors(tagged_tokenses))
testing_stats['rulescores'] = [err0 - err1 for (err0, err1) in zip(errors, errors[1:])]
testing_stats['finalerrors'] = errors[-1]
testing_stats['finalacc'] = 1 - testing_stats['finalerrors']/testing_stats['tokencount']
return (tagged_tokenses, testing_stats)
| [
"collections.Counter",
"inspect.getmembers",
"collections.defaultdict"
] | [((5303, 5364), 'inspect.getmembers', 'inspect.getmembers', (['sys.modules[__name__]', 'inspect.isfunction'], {}), '(sys.modules[__name__], inspect.isfunction)\n', (5321, 5364), False, 'import inspect\n'), ((8385, 8401), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (8396, 8401), False, 'from collections import defaultdict, Counter\n'), ((10430, 10443), 'collections.Counter', 'Counter', (['tids'], {}), '(tids)\n', (10437, 10443), False, 'from collections import defaultdict, Counter\n'), ((10475, 10484), 'collections.Counter', 'Counter', ([], {}), '()\n', (10482, 10484), False, 'from collections import defaultdict, Counter\n'), ((12630, 12639), 'collections.Counter', 'Counter', ([], {}), '()\n', (12637, 12639), False, 'from collections import defaultdict, Counter\n')] |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "WSC"
def get_char_index(text, span_text, span_index):
tokens = text.replace("\n", " ").lower().split(" ")
span_tokens = span_text.replace("\n", " ").lower().split(" ")
# Token exact match
if tokens[span_index : span_index + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(span_text)
return st, ed
if span_index < len(tokens):
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_text.startswith(char_in_span):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
# ed = st + len(char_in_span)
ed = st + len(char_in_text)
return st, ed
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_span.startswith(char_in_text):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(char_in_text)
return st, ed
# Index out of range
if span_index >= len(tokens):
span_index -= 10
# Token fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx : idx + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(span_text)
return st, ed
# Token best fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx] == span_tokens[0]:
for length in range(1, len(span_tokens)):
if tokens[idx : idx + length] != span_tokens[:length]:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(" ".join(span_tokens[: length - 1]))
return st, ed
return None
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
for i in range(2):
logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
# sentence text
sentences = []
# span1
span1s = []
# span2
span2s = []
# span1 idx
span1_idxs = []
# span2 idx
span2_idxs = []
# label
labels = []
token1_idxs = []
token2_idxs = []
xlnet_tokens = []
xlnet_token_ids = []
xlnet_token_masks = []
xlnet_token_segments = []
# Check the maximum token length
max_len = -1
for row in rows:
index = row["idx"]
text = row["text"]
span1_text = row["target"]["span1_text"]
span2_text = row["target"]["span2_text"]
span1_index = row["target"]["span1_index"]
span2_index = row["target"]["span2_index"]
label = row["label"] if "label" in row else True
span1_char_index = get_char_index(text, span1_text, span1_index)
span2_char_index = get_char_index(text, span2_text, span2_index)
assert span1_char_index is not None, f"Check example {id} in {jsonl_path}"
assert span2_char_index is not None, f"Check example {id} in {jsonl_path}"
# Tokenize sentences
xlnet_tokens_sub1 = tokenizer.tokenize(
text[: min(span1_char_index[0], span2_char_index[0])]
)
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub2 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
token1_idx = [
len(xlnet_tokens_sub1) + 1,
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2),
]
else:
xlnet_tokens_sub2 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
token2_idx = [
len(xlnet_tokens_sub1) + 1,
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2),
]
sub3_st = (
span1_char_index[1]
if span1_char_index[0] < span2_char_index[0]
else span2_char_index[1]
)
sub3_ed = (
span1_char_index[0]
if span1_char_index[0] > span2_char_index[0]
else span2_char_index[0]
)
xlnet_tokens_sub3 = tokenizer.tokenize(text[sub3_st:sub3_ed])
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub4 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
cur_len = (
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2) + len(xlnet_tokens_sub3)
)
token2_idx = [cur_len + 1, cur_len + len(xlnet_tokens_sub4)]
else:
xlnet_tokens_sub4 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
cur_len = (
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2) + len(xlnet_tokens_sub3)
)
token1_idx = [cur_len + 1, cur_len + len(xlnet_tokens_sub4)]
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub5 = tokenizer.tokenize(text[span2_char_index[1] :])
else:
xlnet_tokens_sub5 = tokenizer.tokenize(text[span1_char_index[1] :])
tokens = (
["[CLS]"]
+ xlnet_tokens_sub1
+ xlnet_tokens_sub2
+ xlnet_tokens_sub3
+ xlnet_tokens_sub4
+ xlnet_tokens_sub5
+ ["[SEP]"]
)
if len(tokens) > max_len:
max_len = len(tokens)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
token_segments = [0] * len(token_ids)
# Generate mask where 1 for real tokens and 0 for padding tokens
token_masks = [1] * len(token_ids)
token1_idxs.append(token1_idx)
token2_idxs.append(token2_idx)
sentences.append(text)
span1s.append(span1_text)
span2s.append(span2_text)
span1_idxs.append(span1_index)
span2_idxs.append(span2_index)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
xlnet_tokens.append(tokens)
xlnet_token_ids.append(torch.LongTensor(token_ids))
xlnet_token_masks.append(torch.LongTensor(token_masks))
xlnet_token_segments.append(torch.LongTensor(token_segments))
token1_idxs = torch.from_numpy(np.array(token1_idxs))
token2_idxs = torch.from_numpy(np.array(token2_idxs))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sentence": sentences,
"span1": span1s,
"span2": span2s,
"span1_idx": span1_idxs,
"span2_idx": span2_idxs,
"token1_idx": token1_idxs,
"token2_idx": token2_idxs,
"tokens": xlnet_tokens,
"token_ids": xlnet_token_ids,
"token_masks": xlnet_token_masks,
"token_segments": xlnet_token_segments,
},
Y_dict={"labels": labels},
)
| [
"logging.getLogger",
"json.loads",
"snorkel.mtl.data.MultitaskDataset",
"torch.LongTensor",
"numpy.array",
"sys.path.append"
] | [((167, 188), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (182, 188), False, 'import sys\n'), ((249, 276), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (266, 276), False, 'import logging\n'), ((7278, 7658), 'snorkel.mtl.data.MultitaskDataset', 'MultitaskDataset', ([], {'name': '"""SuperGLUE"""', 'X_dict': "{'sentence': sentences, 'span1': span1s, 'span2': span2s, 'span1_idx':\n span1_idxs, 'span2_idx': span2_idxs, 'token1_idx': token1_idxs,\n 'token2_idx': token2_idxs, 'tokens': xlnet_tokens, 'token_ids':\n xlnet_token_ids, 'token_masks': xlnet_token_masks, 'token_segments':\n xlnet_token_segments}", 'Y_dict': "{'labels': labels}"}), "(name='SuperGLUE', X_dict={'sentence': sentences, 'span1':\n span1s, 'span2': span2s, 'span1_idx': span1_idxs, 'span2_idx':\n span2_idxs, 'token1_idx': token1_idxs, 'token2_idx': token2_idxs,\n 'tokens': xlnet_tokens, 'token_ids': xlnet_token_ids, 'token_masks':\n xlnet_token_masks, 'token_segments': xlnet_token_segments}, Y_dict={\n 'labels': labels})\n", (7294, 7658), False, 'from snorkel.mtl.data import MultitaskDataset\n'), ((2503, 2518), 'json.loads', 'json.loads', (['row'], {}), '(row)\n', (2513, 2518), False, 'import json\n'), ((7091, 7112), 'numpy.array', 'np.array', (['token1_idxs'], {}), '(token1_idxs)\n', (7099, 7112), True, 'import numpy as np\n'), ((7149, 7170), 'numpy.array', 'np.array', (['token2_idxs'], {}), '(token2_idxs)\n', (7157, 7170), True, 'import numpy as np\n'), ((7203, 7219), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (7211, 7219), True, 'import numpy as np\n'), ((6892, 6919), 'torch.LongTensor', 'torch.LongTensor', (['token_ids'], {}), '(token_ids)\n', (6908, 6919), False, 'import torch\n'), ((6954, 6983), 'torch.LongTensor', 'torch.LongTensor', (['token_masks'], {}), '(token_masks)\n', (6970, 6983), False, 'import torch\n'), ((7021, 7053), 'torch.LongTensor', 'torch.LongTensor', (['token_segments'], {}), '(token_segments)\n', (7037, 7053), False, 'import torch\n')] |
import re
import json
__all__ = ["Simplimental"]
class Simplimental:
def __init__(self, text="This is not a bad idea"):
self.text = text
with open('simplimental/data/afinn.json') as data_file:
self.dictionary = json.load(data_file)
no_punctunation = re.sub(r"[^a-zA-Z ]+", " ", self.text)
self.tokens = no_punctunation.lower().split(" ")
for t in self.tokens:
if len(t) < 3 and t not in ["no"]:
self.tokens.remove(t)
def negativity(self):
hits = 0
words = []
for i in range(len(self.tokens)):
word = self.tokens[i]
score = self.dictionary.get(word, 0)
if i > 0 and self.tokens[i-1] in ["no", "not"]:
word = "not_" + word
score = -score if score > 0 else 0
if score < 0:
hits -= score
words.append(word)
return {
"score": hits,
"comparative": float(hits) / len(self.tokens),
"words": words
}
def positivity(self):
hits = 0
words = []
for i in range(len(self.tokens)):
word = self.tokens[i]
score = self.dictionary.get(word, 0)
if i > 0 and self.tokens[i-1] in ["no", "not"]:
word = "not_" + word
score = -score if score < 0 else 0
if score > 0:
hits += score
words.append(word)
return {
"score": hits,
"comparative": float(hits) / len(self.tokens),
"words": words
}
def analyze(self):
negativity = self.negativity()
positivity = self.positivity()
return {
"score": positivity["score"] - negativity["score"],
"comparative": positivity["comparative"] - negativity["comparative"],
}
| [
"json.load",
"re.sub"
] | [((270, 307), 're.sub', 're.sub', (['"""[^a-zA-Z ]+"""', '""" """', 'self.text'], {}), "('[^a-zA-Z ]+', ' ', self.text)\n", (276, 307), False, 'import re\n'), ((228, 248), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (237, 248), False, 'import json\n')] |
# This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string.
# You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings.
# It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API.
#
# More information about the RoboDK API here:
# https://robodk.com/doc/en/RoboDK-API.html
# For more information visit:
# https://robodk.com/doc/en/PythonAPI/robolink.html
from robolink import * # RoboDK API
# JSON tools
import json
# Start the RoboDK API
RDK = Robolink()
# Ask the user to select a robot arm (6 axis robot wich can have external axes)
robot = RDK.ItemUserPick("Select a robot arm",ITEM_TYPE_ROBOT_ARM)
# Default optimization settings test template
AxesOptimSettings = {
# Optimization parameters:
"Active": 1, # Use generic axes optimization: 0=Disabled or 1=Enabled
"Algorithm": 2, # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead
"MaxIter": 650, # Max. number of iterations
"Tol": 0.0016, # Tolerance to stop iterations
# Absolute Reference joints (double):
"AbsJnt_1": 104.17,
"AbsJnt_2": 11.22,
"AbsJnt_3": 15.97,
"AbsJnt_4": -87.48,
"AbsJnt_5": -75.36,
"AbsJnt_6": 63.03,
"AbsJnt_7": 174.13,
"AbsJnt_8": 173.60,
"AbsJnt_9": 0,
# Using Absolute reference joints (0: No, 1: Yes):
"AbsOn_1": 1,
"AbsOn_2": 1,
"AbsOn_3": 1,
"AbsOn_4": 1,
"AbsOn_5": 1,
"AbsOn_6": 1,
"AbsOn_7": 1,
"AbsOn_8": 1,
"AbsOn_9": 1,
# Weight for absolute reference joints (double):
"AbsW_1": 100,
"AbsW_2": 100,
"AbsW_3": 100,
"AbsW_4": 89,
"AbsW_5": 90,
"AbsW_6": 92,
"AbsW_7": 92,
"AbsW_8": 96,
"AbsW_9": 50,
# Using for relative joint motion smoothing (0: No, 1: Yes):
"RelOn_1": 1,
"RelOn_2": 1,
"RelOn_3": 1,
"RelOn_4": 1,
"RelOn_5": 1,
"RelOn_6": 1,
"RelOn_7": 1,
"RelOn_8": 1,
"RelOn_9": 1,
# Weight for relative joint motion (double):
"RelW_1": 5,
"RelW_2": 47,
"RelW_3": 44,
"RelW_4": 43,
"RelW_5": 36,
"RelW_6": 47,
"RelW_7": 53,
"RelW_8": 59,
"RelW_9": 0,
}
# Update one value, for example, make it active:
ToUpdate = {}
ToUpdate["Active"] = 1
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Example to make a partial or full update
count = 1
while True:
for i in range(7):
# Partial update
ToUpdate = {}
ToUpdate["AbsJnt_" + str(i+1)] = (count+i)*4
ToUpdate["AbsOn_" + str(i+1)] = count % 2
ToUpdate["AbsW_" + str(i+1)] = (count+i)
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Full update
#OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4
#OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i)
#OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2
# Full update
#print(robot.setParam("OptimAxes", str(AxesOptimSettings)))
count = count + 1
# Read settings
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
# Example to read the current axes optimization settings:
while True:
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
| [
"json.loads",
"json.dumps"
] | [((2415, 2435), 'json.dumps', 'json.dumps', (['ToUpdate'], {}), '(ToUpdate)\n', (2425, 2435), False, 'import json\n'), ((3335, 3356), 'json.loads', 'json.loads', (['json_data'], {}), '(json_data)\n', (3345, 3356), False, 'import json\n'), ((3555, 3576), 'json.loads', 'json.loads', (['json_data'], {}), '(json_data)\n', (3565, 3576), False, 'import json\n'), ((3367, 3400), 'json.dumps', 'json.dumps', (['json_object'], {'indent': '(4)'}), '(json_object, indent=4)\n', (3377, 3400), False, 'import json\n'), ((3587, 3620), 'json.dumps', 'json.dumps', (['json_object'], {'indent': '(4)'}), '(json_object, indent=4)\n', (3597, 3620), False, 'import json\n'), ((2829, 2849), 'json.dumps', 'json.dumps', (['ToUpdate'], {}), '(ToUpdate)\n', (2839, 2849), False, 'import json\n')] |
# Generated by Django 3.1 on 2020-09-08 07:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OpeningSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Opening',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('eco', models.CharField(max_length=3)),
('moves', models.TextField()),
('opening_system', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.openingsystem')),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('elo_mean', models.IntegerField(default=0)),
('elo_diff', models.IntegerField(default=0)),
('result', models.CharField(max_length=40)),
('timecontrol', models.CharField(max_length=40)),
('timestamp', models.DateTimeField()),
('raw', models.TextField()),
('opening', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.opening')),
],
),
migrations.CreateModel(
name='Analyse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('turnover_move', models.IntegerField(default=0)),
('turnover_evaluation', models.IntegerField(default=0)),
('unbalance_material', models.IntegerField(default=0)),
('unbalance_officers', models.IntegerField(default=0)),
('unbalance_exchange', models.IntegerField(default=0)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.game')),
],
),
]
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((340, 433), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (356, 433), False, 'from django.db import migrations, models\n'), ((457, 488), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (473, 488), False, 'from django.db import migrations, models\n'), ((618, 711), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (634, 711), False, 'from django.db import migrations, models\n'), ((735, 766), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (751, 766), False, 'from django.db import migrations, models\n'), ((899, 992), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (915, 992), False, 'from django.db import migrations, models\n'), ((1016, 1047), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (1032, 1047), False, 'from django.db import migrations, models\n'), ((1074, 1104), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)'}), '(max_length=3)\n', (1090, 1104), False, 'from django.db import migrations, models\n'), ((1133, 1151), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1149, 1151), False, 'from django.db import migrations, models\n'), ((1189, 1284), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""insight.openingsystem"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'insight.openingsystem')\n", (1206, 1284), False, 'from django.db import migrations, models\n'), ((1409, 1502), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1425, 1502), False, 'from django.db import migrations, models\n'), ((1530, 1560), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1549, 1560), False, 'from django.db import migrations, models\n'), ((1592, 1622), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1611, 1622), False, 'from django.db import migrations, models\n'), ((1652, 1683), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (1668, 1683), False, 'from django.db import migrations, models\n'), ((1718, 1749), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (1734, 1749), False, 'from django.db import migrations, models\n'), ((1782, 1804), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1802, 1804), False, 'from django.db import migrations, models\n'), ((1831, 1849), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1847, 1849), False, 'from django.db import migrations, models\n'), ((1880, 1969), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""insight.opening"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'insight.opening')\n", (1897, 1969), False, 'from django.db import migrations, models\n'), ((2097, 2190), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2113, 2190), False, 'from django.db import migrations, models\n'), ((2223, 2253), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2242, 2253), False, 'from django.db import migrations, models\n'), ((2296, 2326), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2315, 2326), False, 'from django.db import migrations, models\n'), ((2368, 2398), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2387, 2398), False, 'from django.db import migrations, models\n'), ((2440, 2470), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2459, 2470), False, 'from django.db import migrations, models\n'), ((2512, 2542), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2531, 2542), False, 'from django.db import migrations, models\n'), ((2570, 2656), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""insight.game"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'insight.game')\n", (2587, 2656), False, 'from django.db import migrations, models\n')] |
from robotpy_ext.control.toggle import Toggle
from robotpy_ext.misc.precise_delay import NotifierDelay
class FakeJoystick:
def __init__(self):
self._pressed = [False] * 2
def getRawButton(self, num):
return self._pressed[num]
def press(self, num):
self._pressed[num] = True
def release(self, num):
self._pressed[num] = False
def test_toggle():
joystick = FakeJoystick()
toggleButton = Toggle(joystick, 0)
toggleButton2 = Toggle(joystick, 1)
assert toggleButton.off
joystick.press(0)
assert toggleButton.on
assert toggleButton2.off
joystick.release(0)
assert toggleButton.on
joystick.press(0)
assert toggleButton.off
joystick.release(0)
assert toggleButton.off
joystick.press(1)
assert toggleButton.off
assert toggleButton2.on
def test_toggle_debounce():
# TODO: use simulated time
delay = NotifierDelay(0.5)
joystick = FakeJoystick()
toggleButton = Toggle(joystick, 1, 0.1)
assert toggleButton.off
joystick.press(1)
assert toggleButton.on
joystick.release(1)
joystick.press(1)
joystick.release(1)
assert toggleButton.on
delay.wait()
assert toggleButton.on
joystick.press(1)
assert toggleButton.off
| [
"robotpy_ext.control.toggle.Toggle",
"robotpy_ext.misc.precise_delay.NotifierDelay"
] | [((448, 467), 'robotpy_ext.control.toggle.Toggle', 'Toggle', (['joystick', '(0)'], {}), '(joystick, 0)\n', (454, 467), False, 'from robotpy_ext.control.toggle import Toggle\n'), ((488, 507), 'robotpy_ext.control.toggle.Toggle', 'Toggle', (['joystick', '(1)'], {}), '(joystick, 1)\n', (494, 507), False, 'from robotpy_ext.control.toggle import Toggle\n'), ((918, 936), 'robotpy_ext.misc.precise_delay.NotifierDelay', 'NotifierDelay', (['(0.5)'], {}), '(0.5)\n', (931, 936), False, 'from robotpy_ext.misc.precise_delay import NotifierDelay\n'), ((986, 1010), 'robotpy_ext.control.toggle.Toggle', 'Toggle', (['joystick', '(1)', '(0.1)'], {}), '(joystick, 1, 0.1)\n', (992, 1010), False, 'from robotpy_ext.control.toggle import Toggle\n')] |
'''
This file contains test cases for tflearn
'''
import tensorflow.compat.v1 as tf
import tflearn
import unittest
class TestActivations(unittest.TestCase):
'''
This class contains test cases for the functions in tflearn/activations.py
'''
PLACES = 4 # Number of places to match when testing floating point values
def test_linear(self):
f = tflearn.linear
# Case 1
x = tf.placeholder(tf.float32, shape=())
self.assertEqual(f(x), x)
# Case 2
x = tf.placeholder(tf.int64, shape=())
self.assertEqual(f(x), x)
def test_tanh(self):
f = tflearn.tanh
x = tf.placeholder(tf.float32, shape=())
with tf.Session() as sess:
# Case 1
self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0)
# Case 2
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:0.5}),
0.4621, places=TestActivations.PLACES)
# Case 3
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-0.25}),
-0.2449, places=TestActivations.PLACES)
def test_leaky_relu(self):
f = lambda x: tflearn.leaky_relu(x, alpha=0.2)
x = tf.placeholder(tf.float32, shape=())
with tf.Session() as sess:
# Case 1
self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0)
# Case 2
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:1}),
1, places=TestActivations.PLACES)
# Case 3
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-1}),
-0.2, places=TestActivations.PLACES)
# Case 4
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-5}),
-1, places=TestActivations.PLACES)
def test_apply_activation(self):
lrelu_02 = lambda x: tflearn.leaky_relu(x, alpha=0.2)
x = tf.constant(-0.25, tf.float32)
with tf.Session() as sess:
# Case 1: 'linear'
self.assertEqual(
sess.run(tflearn.activation(x, 'linear')),
-0.25)
# Case 2: 'relu'
self.assertEqual(
sess.run(tflearn.activation(x, 'relu')),
0)
# Case 3: 'leaky_relu'
self.assertAlmostEqual(
sess.run(tflearn.activation(x, 'leaky_relu')),
-0.025, places=TestActivations.PLACES)
# Case 4: 'tanh'
self.assertAlmostEqual(
sess.run(tflearn.activation(x, 'tanh')),
-0.2449, places=TestActivations.PLACES)
# Case 5: lrelu_02 (callable)
self.assertAlmostEqual(
sess.run(tflearn.activation(x, lrelu_02)),
-0.05, places=TestActivations.PLACES)
if __name__ == "__main__":
unittest.main() | [
"tensorflow.compat.v1.placeholder",
"tflearn.leaky_relu",
"tensorflow.compat.v1.constant",
"unittest.main",
"tflearn.activation",
"tensorflow.compat.v1.Session"
] | [((2857, 2872), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2870, 2872), False, 'import unittest\n'), ((425, 461), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '()'}), '(tf.float32, shape=())\n', (439, 461), True, 'import tensorflow.compat.v1 as tf\n'), ((526, 560), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '()'}), '(tf.int64, shape=())\n', (540, 560), True, 'import tensorflow.compat.v1 as tf\n'), ((658, 694), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '()'}), '(tf.float32, shape=())\n', (672, 694), True, 'import tensorflow.compat.v1 as tf\n'), ((1221, 1257), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '()'}), '(tf.float32, shape=())\n', (1235, 1257), True, 'import tensorflow.compat.v1 as tf\n'), ((1918, 1948), 'tensorflow.compat.v1.constant', 'tf.constant', (['(-0.25)', 'tf.float32'], {}), '(-0.25, tf.float32)\n', (1929, 1948), True, 'import tensorflow.compat.v1 as tf\n'), ((717, 729), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (727, 729), True, 'import tensorflow.compat.v1 as tf\n'), ((1176, 1208), 'tflearn.leaky_relu', 'tflearn.leaky_relu', (['x'], {'alpha': '(0.2)'}), '(x, alpha=0.2)\n', (1194, 1208), False, 'import tflearn\n'), ((1272, 1284), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (1282, 1284), True, 'import tensorflow.compat.v1 as tf\n'), ((1873, 1905), 'tflearn.leaky_relu', 'tflearn.leaky_relu', (['x'], {'alpha': '(0.2)'}), '(x, alpha=0.2)\n', (1891, 1905), False, 'import tflearn\n'), ((1963, 1975), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (1973, 1975), True, 'import tensorflow.compat.v1 as tf\n'), ((2071, 2102), 'tflearn.activation', 'tflearn.activation', (['x', '"""linear"""'], {}), "(x, 'linear')\n", (2089, 2102), False, 'import tflearn\n'), ((2213, 2242), 'tflearn.activation', 'tflearn.activation', (['x', '"""relu"""'], {}), "(x, 'relu')\n", (2231, 2242), False, 'import tflearn\n'), ((2361, 2396), 'tflearn.activation', 'tflearn.activation', (['x', '"""leaky_relu"""'], {}), "(x, 'leaky_relu')\n", (2379, 2396), False, 'import tflearn\n'), ((2545, 2574), 'tflearn.activation', 'tflearn.activation', (['x', '"""tanh"""'], {}), "(x, 'tanh')\n", (2563, 2574), False, 'import tflearn\n'), ((2737, 2768), 'tflearn.activation', 'tflearn.activation', (['x', 'lrelu_02'], {}), '(x, lrelu_02)\n', (2755, 2768), False, 'import tflearn\n')] |
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import os
import sys
import subprocess
if __name__ == "__main__":
success = True
try:
subprocess.run('cd /root; fab install-libs', shell=True, check=True)
except:
success = False
reply = dict()
reply['request_id'] = os.environ['request_id']
if success:
reply['status'] = 'ok'
else:
reply['status'] = 'err'
reply['response'] = dict()
try:
with open("/root/result.json") as f:
reply['response']['result'] = json.loads(f.read())
except:
reply['response']['result'] = {"error": "Failed to open result.json"}
reply['response']['log'] = "/var/log/datalab/{0}/{0}_{1}_{2}.log".format(os.environ['conf_resource'],
os.environ['project_name'],
os.environ['request_id'])
with open("/response/{}_{}_{}.json".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id']), 'w') as response_file:
response_file.write(json.dumps(reply))
try:
subprocess.run('chmod 666 /response/*', shell=True, check=True)
except:
success = False
if not success:
sys.exit(1) | [
"json.dumps",
"subprocess.run",
"sys.exit"
] | [((1086, 1154), 'subprocess.run', 'subprocess.run', (['"""cd /root; fab install-libs"""'], {'shell': '(True)', 'check': '(True)'}), "('cd /root; fab install-libs', shell=True, check=True)\n", (1100, 1154), False, 'import subprocess\n'), ((2173, 2236), 'subprocess.run', 'subprocess.run', (['"""chmod 666 /response/*"""'], {'shell': '(True)', 'check': '(True)'}), "('chmod 666 /response/*', shell=True, check=True)\n", (2187, 2236), False, 'import subprocess\n'), ((2302, 2313), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2310, 2313), False, 'import sys\n'), ((2136, 2153), 'json.dumps', 'json.dumps', (['reply'], {}), '(reply)\n', (2146, 2153), False, 'import json\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
config settings, will be used in finetune.py
"""
from easydict import EasyDict as edict
import mindspore.common.dtype as mstype
from .bert_model import BertConfig
cfg = edict({
'task': 'NER',
'num_labels': 41,
'data_file': '',
'schema_file': None,
'finetune_ckpt': '',
'use_crf': False,
'clue_benchmark': False,
})
bert_net_cfg = BertConfig(
batch_size=8 if not cfg.clue_benchmark else 1,
seq_length=512,
vocab_size=30522,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32,
compute_type=mstype.float16,
)
| [
"easydict.EasyDict"
] | [((844, 990), 'easydict.EasyDict', 'edict', (["{'task': 'NER', 'num_labels': 41, 'data_file': '', 'schema_file': None,\n 'finetune_ckpt': '', 'use_crf': False, 'clue_benchmark': False}"], {}), "({'task': 'NER', 'num_labels': 41, 'data_file': '', 'schema_file':\n None, 'finetune_ckpt': '', 'use_crf': False, 'clue_benchmark': False})\n", (849, 990), True, 'from easydict import EasyDict as edict\n')] |
# -*- coding: utf-8 -*-
# utopia-cms 2020. <NAME>.
from django.core.management import BaseCommand
from django.db.utils import IntegrityError
from apps import core_articleviewedby_mdb
from core.models import ArticleViewedBy
class Command(BaseCommand):
help = "Moves article viewed by data from mongodb to Django model"
def handle(self, *args, **options):
mdb_view = core_articleviewedby_mdb.posts.find_one_and_delete({})
while mdb_view:
try:
avb = ArticleViewedBy.objects.get(article=mdb_view['article'], user=mdb_view['user'])
avb.viewed_at = mdb_view['viewed_at']
avb.save()
except ArticleViewedBy.DoesNotExist:
try:
ArticleViewedBy.objects.create(
article_id=mdb_view['article'], user_id=mdb_view['user'], viewed_at=mdb_view['viewed_at'])
except IntegrityError:
pass
mdb_view = core_articleviewedby_mdb.posts.find_one_and_delete({})
| [
"core.models.ArticleViewedBy.objects.create",
"core.models.ArticleViewedBy.objects.get",
"apps.core_articleviewedby_mdb.posts.find_one_and_delete"
] | [((386, 440), 'apps.core_articleviewedby_mdb.posts.find_one_and_delete', 'core_articleviewedby_mdb.posts.find_one_and_delete', (['{}'], {}), '({})\n', (436, 440), False, 'from apps import core_articleviewedby_mdb\n'), ((989, 1043), 'apps.core_articleviewedby_mdb.posts.find_one_and_delete', 'core_articleviewedby_mdb.posts.find_one_and_delete', (['{}'], {}), '({})\n', (1039, 1043), False, 'from apps import core_articleviewedby_mdb\n'), ((504, 583), 'core.models.ArticleViewedBy.objects.get', 'ArticleViewedBy.objects.get', ([], {'article': "mdb_view['article']", 'user': "mdb_view['user']"}), "(article=mdb_view['article'], user=mdb_view['user'])\n", (531, 583), False, 'from core.models import ArticleViewedBy\n'), ((755, 881), 'core.models.ArticleViewedBy.objects.create', 'ArticleViewedBy.objects.create', ([], {'article_id': "mdb_view['article']", 'user_id': "mdb_view['user']", 'viewed_at': "mdb_view['viewed_at']"}), "(article_id=mdb_view['article'], user_id=\n mdb_view['user'], viewed_at=mdb_view['viewed_at'])\n", (785, 881), False, 'from core.models import ArticleViewedBy\n')] |
import torch
import torch.nn as nn
from torch.optim import SGD
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
from examples.common import data_loader
from examples.resnet import ResNetBase
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = None
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_TENSOR_STRIDE = 1
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling
# initialize_coords
def __init__(self, in_channels, out_channels, D=3):
ResNetBase.__init__(self, in_channels, out_channels, D)
def network_initialization(self, in_channels, out_channels, D):
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, dimension=D)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes)
self.conv1p1s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes)
self.block1 = self._make_layer(self.BLOCK, self.PLANES[0],
self.LAYERS[0])
self.conv2p2s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn2 = ME.MinkowskiBatchNorm(self.inplanes)
self.block2 = self._make_layer(self.BLOCK, self.PLANES[1],
self.LAYERS[1])
self.conv3p4s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn3 = ME.MinkowskiBatchNorm(self.inplanes)
self.block3 = self._make_layer(self.BLOCK, self.PLANES[2],
self.LAYERS[2])
self.conv4p8s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn4 = ME.MinkowskiBatchNorm(self.inplanes)
self.block4 = self._make_layer(self.BLOCK, self.PLANES[3],
self.LAYERS[3])
self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[4], kernel_size=2, stride=2, dimension=D)
self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4])
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(self.BLOCK, self.PLANES[4],
self.LAYERS[4])
self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[5], kernel_size=2, stride=2, dimension=D)
self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5])
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(self.BLOCK, self.PLANES[5],
self.LAYERS[5])
self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[6], kernel_size=2, stride=2, dimension=D)
self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6])
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(self.BLOCK, self.PLANES[6],
self.LAYERS[6])
self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[7], kernel_size=2, stride=2, dimension=D)
self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7])
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(self.BLOCK, self.PLANES[7],
self.LAYERS[7])
self.final = ME.MinkowskiConvolution(
self.PLANES[7],
out_channels,
kernel_size=1,
has_bias=True,
dimension=D)
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# tensor_stride=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# tensor_stride=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = ME.cat((out, out_b3p8))
out = self.block5(out)
# tensor_stride=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = ME.cat((out, out_b2p4))
out = self.block6(out)
# tensor_stride=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = ME.cat((out, out_b1p2))
out = self.block7(out)
# tensor_stride=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = ME.cat((out, out_p1))
out = self.block8(out)
return self.final(out)
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
if __name__ == '__main__':
# loss and network
criterion = nn.CrossEntropyLoss()
net = MinkUNet14A(in_channels=3, out_channels=5, D=2)
print(net)
# a data loader must return a tuple of coords, features, and labels.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
optimizer = SGD(net.parameters(), lr=1e-2)
for i in range(10):
optimizer.zero_grad()
# Get new data
coords, feat, label = data_loader(is_classification=False)
input = ME.SparseTensor(feat, coords=coords).to(device)
label = label.to(device)
# Forward
output = net(input)
# Loss
loss = criterion(output.F, label)
print('Iteration: ', i, ', Loss: ', loss.item())
# Gradient
loss.backward()
optimizer.step()
# Saving and loading a network
torch.save(net.state_dict(), 'test.pth')
net.load_state_dict(torch.load('test.pth'))
| [
"MinkowskiEngine.MinkowskiReLU",
"MinkowskiEngine.cat",
"torch.nn.CrossEntropyLoss",
"torch.load",
"examples.common.data_loader",
"MinkowskiEngine.MinkowskiConvolution",
"MinkowskiEngine.MinkowskiBatchNorm",
"torch.cuda.is_available",
"MinkowskiEngine.MinkowskiConvolutionTranspose",
"examples.resnet.ResNetBase.__init__",
"MinkowskiEngine.SparseTensor"
] | [((6981, 7002), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7000, 7002), True, 'import torch.nn as nn\n'), ((675, 730), 'examples.resnet.ResNetBase.__init__', 'ResNetBase.__init__', (['self', 'in_channels', 'out_channels', 'D'], {}), '(self, in_channels, out_channels, D)\n', (694, 730), False, 'from examples.resnet import ResNetBase\n'), ((916, 995), 'MinkowskiEngine.MinkowskiConvolution', 'ME.MinkowskiConvolution', (['in_channels', 'self.inplanes'], {'kernel_size': '(5)', 'dimension': 'D'}), '(in_channels, self.inplanes, kernel_size=5, dimension=D)\n', (939, 995), True, 'import MinkowskiEngine as ME\n'), ((1029, 1065), 'MinkowskiEngine.MinkowskiBatchNorm', 'ME.MinkowskiBatchNorm', (['self.inplanes'], {}), '(self.inplanes)\n', (1050, 1065), True, 'import MinkowskiEngine as ME\n'), ((1092, 1188), 'MinkowskiEngine.MinkowskiConvolution', 'ME.MinkowskiConvolution', (['self.inplanes', 'self.inplanes'], {'kernel_size': '(2)', 'stride': '(2)', 'dimension': 'D'}), '(self.inplanes, self.inplanes, kernel_size=2, stride\n =2, dimension=D)\n', (1115, 1188), True, 'import MinkowskiEngine as ME\n'), ((1216, 1252), 'MinkowskiEngine.MinkowskiBatchNorm', 'ME.MinkowskiBatchNorm', (['self.inplanes'], {}), '(self.inplanes)\n', (1237, 1252), True, 'import MinkowskiEngine as ME\n'), ((1402, 1498), 'MinkowskiEngine.MinkowskiConvolution', 'ME.MinkowskiConvolution', (['self.inplanes', 'self.inplanes'], {'kernel_size': '(2)', 'stride': '(2)', 'dimension': 'D'}), '(self.inplanes, self.inplanes, kernel_size=2, stride\n =2, dimension=D)\n', (1425, 1498), True, 'import MinkowskiEngine as ME\n'), ((1526, 1562), 'MinkowskiEngine.MinkowskiBatchNorm', 'ME.MinkowskiBatchNorm', (['self.inplanes'], {}), '(self.inplanes)\n', (1547, 1562), True, 'import MinkowskiEngine as ME\n'), ((1712, 1808), 'MinkowskiEngine.MinkowskiConvolution', 'ME.MinkowskiConvolution', (['self.inplanes', 'self.inplanes'], {'kernel_size': '(2)', 'stride': '(2)', 'dimension': 'D'}), '(self.inplanes, self.inplanes, kernel_size=2, stride\n =2, dimension=D)\n', (1735, 1808), True, 'import MinkowskiEngine as ME\n'), ((1837, 1873), 'MinkowskiEngine.MinkowskiBatchNorm', 'ME.MinkowskiBatchNorm', (['self.inplanes'], {}), '(self.inplanes)\n', (1858, 1873), True, 'import MinkowskiEngine as ME\n'), ((2022, 2118), 'MinkowskiEngine.MinkowskiConvolution', 'ME.MinkowskiConvolution', (['self.inplanes', 'self.inplanes'], {'kernel_size': '(2)', 'stride': '(2)', 'dimension': 'D'}), '(self.inplanes, self.inplanes, kernel_size=2, stride\n =2, dimension=D)\n', (2045, 2118), True, 'import MinkowskiEngine as ME\n'), ((2146, 2182), 'MinkowskiEngine.MinkowskiBatchNorm', 'ME.MinkowskiBatchNorm', (['self.inplanes'], {}), '(self.inplanes)\n', (2167, 2182), True, 'import MinkowskiEngine as ME\n'), ((2334, 2440), 'MinkowskiEngine.MinkowskiConvolutionTranspose', 'ME.MinkowskiConvolutionTranspose', (['self.inplanes', 'self.PLANES[4]'], {'kernel_size': '(2)', 'stride': '(2)', 'dimension': 'D'}), '(self.inplanes, self.PLANES[4], kernel_size\n =2, stride=2, dimension=D)\n', (2366, 2440), True, 'import MinkowskiEngine as ME\n'), ((2470, 2507), 'MinkowskiEngine.MinkowskiBatchNorm', 'ME.MinkowskiBatchNorm', (['self.PLANES[4]'], {}), '(self.PLANES[4])\n', (2491, 2507), True, 'import MinkowskiEngine as ME\n'), ((2737, 2843), 'MinkowskiEngine.MinkowskiConvolutionTranspose', 'ME.MinkowskiConvolutionTranspose', (['self.inplanes', 'self.PLANES[5]'], {'kernel_size': '(2)', 'stride': '(2)', 'dimension': 'D'}), '(self.inplanes, self.PLANES[5], kernel_size\n =2, stride=2, dimension=D)\n', (2769, 2843), True, 'import MinkowskiEngine as ME\n'), ((2873, 2910), 'MinkowskiEngine.MinkowskiBatchNorm', 'ME.MinkowskiBatchNorm', (['self.PLANES[5]'], {}), '(self.PLANES[5])\n', (2894, 2910), True, 'import MinkowskiEngine as ME\n'), ((3140, 3246), 'MinkowskiEngine.MinkowskiConvolutionTranspose', 'ME.MinkowskiConvolutionTranspose', (['self.inplanes', 'self.PLANES[6]'], {'kernel_size': '(2)', 'stride': '(2)', 'dimension': 'D'}), '(self.inplanes, self.PLANES[6], kernel_size\n =2, stride=2, dimension=D)\n', (3172, 3246), True, 'import MinkowskiEngine as ME\n'), ((3276, 3313), 'MinkowskiEngine.MinkowskiBatchNorm', 'ME.MinkowskiBatchNorm', (['self.PLANES[6]'], {}), '(self.PLANES[6])\n', (3297, 3313), True, 'import MinkowskiEngine as ME\n'), ((3543, 3649), 'MinkowskiEngine.MinkowskiConvolutionTranspose', 'ME.MinkowskiConvolutionTranspose', (['self.inplanes', 'self.PLANES[7]'], {'kernel_size': '(2)', 'stride': '(2)', 'dimension': 'D'}), '(self.inplanes, self.PLANES[7], kernel_size\n =2, stride=2, dimension=D)\n', (3575, 3649), True, 'import MinkowskiEngine as ME\n'), ((3679, 3716), 'MinkowskiEngine.MinkowskiBatchNorm', 'ME.MinkowskiBatchNorm', (['self.PLANES[7]'], {}), '(self.PLANES[7])\n', (3700, 3716), True, 'import MinkowskiEngine as ME\n'), ((3917, 4017), 'MinkowskiEngine.MinkowskiConvolution', 'ME.MinkowskiConvolution', (['self.PLANES[7]', 'out_channels'], {'kernel_size': '(1)', 'has_bias': '(True)', 'dimension': 'D'}), '(self.PLANES[7], out_channels, kernel_size=1,\n has_bias=True, dimension=D)\n', (3940, 4017), True, 'import MinkowskiEngine as ME\n'), ((4095, 4125), 'MinkowskiEngine.MinkowskiReLU', 'ME.MinkowskiReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4111, 4125), True, 'import MinkowskiEngine as ME\n'), ((4935, 4958), 'MinkowskiEngine.cat', 'ME.cat', (['(out, out_b3p8)'], {}), '((out, out_b3p8))\n', (4941, 4958), True, 'import MinkowskiEngine as ME\n'), ((5127, 5150), 'MinkowskiEngine.cat', 'ME.cat', (['(out, out_b2p4)'], {}), '((out, out_b2p4))\n', (5133, 5150), True, 'import MinkowskiEngine as ME\n'), ((5319, 5342), 'MinkowskiEngine.cat', 'ME.cat', (['(out, out_b1p2)'], {}), '((out, out_b1p2))\n', (5325, 5342), True, 'import MinkowskiEngine as ME\n'), ((5511, 5532), 'MinkowskiEngine.cat', 'ME.cat', (['(out, out_p1)'], {}), '((out, out_p1))\n', (5517, 5532), True, 'import MinkowskiEngine as ME\n'), ((7406, 7442), 'examples.common.data_loader', 'data_loader', ([], {'is_classification': '(False)'}), '(is_classification=False)\n', (7417, 7442), False, 'from examples.common import data_loader\n'), ((7876, 7898), 'torch.load', 'torch.load', (['"""test.pth"""'], {}), "('test.pth')\n", (7886, 7898), False, 'import torch\n'), ((7186, 7211), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7209, 7211), False, 'import torch\n'), ((7459, 7495), 'MinkowskiEngine.SparseTensor', 'ME.SparseTensor', (['feat'], {'coords': 'coords'}), '(feat, coords=coords)\n', (7474, 7495), True, 'import MinkowskiEngine as ME\n')] |
"""TODO."""
from setuptools import setup
setup(
name='nginx-access-tailer',
version='0.1',
author='swfrench',
url='https://github.com/swfrench/nginx-tailer',
packages=['nginx_access_tailer',],
license='BSD three-clause license',
entry_points={
'console_scripts': ['nginx-access-tailer = nginx_access_tailer.__main__:main'],
},
install_requires=[
'python-gflags >= 3.1.1',
'google-cloud-monitoring >= 0.25.0',
],
test_suite='nose.collector',
tests_require=['nose', 'mock'],
)
| [
"setuptools.setup"
] | [((43, 497), 'setuptools.setup', 'setup', ([], {'name': '"""nginx-access-tailer"""', 'version': '"""0.1"""', 'author': '"""swfrench"""', 'url': '"""https://github.com/swfrench/nginx-tailer"""', 'packages': "['nginx_access_tailer']", 'license': '"""BSD three-clause license"""', 'entry_points': "{'console_scripts': ['nginx-access-tailer = nginx_access_tailer.__main__:main']\n }", 'install_requires': "['python-gflags >= 3.1.1', 'google-cloud-monitoring >= 0.25.0']", 'test_suite': '"""nose.collector"""', 'tests_require': "['nose', 'mock']"}), "(name='nginx-access-tailer', version='0.1', author='swfrench', url=\n 'https://github.com/swfrench/nginx-tailer', packages=[\n 'nginx_access_tailer'], license='BSD three-clause license',\n entry_points={'console_scripts': [\n 'nginx-access-tailer = nginx_access_tailer.__main__:main']},\n install_requires=['python-gflags >= 3.1.1',\n 'google-cloud-monitoring >= 0.25.0'], test_suite='nose.collector',\n tests_require=['nose', 'mock'])\n", (48, 497), False, 'from setuptools import setup\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*
import os
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
setup(
name='persistent-celery-beat-scheduler',
version='0.1.1.dev0',
packages=find_packages('src', exclude=('tests',)),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
description=(
'Celery Beat Scheduler that stores the scheduler data in Redis.'
),
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2',
long_description='https://github.com/richardasaurus/persistent-celery-beat-scheduler',
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
)
| [
"os.path.abspath",
"setuptools.find_packages"
] | [((396, 436), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {'exclude': "('tests',)"}), "('src', exclude=('tests',))\n", (409, 436), False, 'from setuptools import find_packages, setup\n'), ((182, 207), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (197, 207), False, 'import os\n')] |
import collections
import unittest
import driver
from driver.protocol import *
_server = ('localhost', 11211)
_dead_retry = 30
_socket_timeout = 3
_max_receive_size = 4096
class MockConnection(object):
def __init__(self,
server=_server,
dead_retry=30,
socket_timeout=3):
self.server = server
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.closed = True
self.socket = None
self.send_buffer = collections.deque()
self.receive_buffer = collections.deque()
self.on_read = None
self.on_write = None
def open(self):
self.closed = False
self.socket = True
return True
def close(self):
self.closed = True
self.socket = None
def send(self, data):
if self.on_write is not None:
self.on_write()
self.send_buffer.append(data)
def read(self, size=_max_receive_size):
if self.on_read is not None:
self.on_read()
return self.receive_buffer.popleft()
class ClientTests(unittest.TestCase):
def setUp(self):
self.client = driver.Client(_server)
self.mock = MockConnection()
self.client._connection = self.mock
self.client.connect()
def test_initialize_and_connect(self):
self.assertFalse(self.mock.closed)
def test_disconnect(self):
self.client.disconnect()
self.assertTrue(self.mock.closed)
def test_set_value_without_response(self):
self.client.set('testkey', 'testvalue')
self.assertEqual(self.mock.send_buffer.pop(), b'set testkey 0 0 9 noreply\r\ntestvalue\r\n')
def test_set_value_with_stored_response(self):
self.mock.receive_buffer.append(StoreReply.STORED + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertTrue(response)
def test_set_value_with_not_stored_response(self):
self.mock.receive_buffer.append(StoreReply.NOT_STORED + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(response)
def test_set_value_with_exists_response(self):
self.mock.receive_buffer.append(StoreReply.EXISTS + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(response)
def test_set_value_with_error_response(self):
self.mock.receive_buffer.append(Errors.ERROR + Constants.END_LINE)
with self.assertRaises(driver.DriverUnknownException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_with_server_error_response(self):
self.mock.receive_buffer.append(Errors.SERVER_ERROR + b' Test server error' + Constants.END_LINE)
with self.assertRaises(driver.DriverServerException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_with_client_error_response(self):
self.mock.receive_buffer.append(Errors.CLIENT_ERROR + b' Test client error' + Constants.END_LINE)
with self.assertRaises(driver.DriverClientException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_exception(self):
error_message = "Test write exception"
self.mock.on_write = lambda: _raise_exception(error_message)
result = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(result)
def test_get_value_exception(self):
error_message = "Test read exception"
self.mock.on_read = lambda: _raise_exception(error_message)
result = self.client.get('testkey')
self.assertIsNone(result)
def _raise_exception(message):
raise Exception(message)
| [
"collections.deque",
"driver.Client"
] | [((521, 540), 'collections.deque', 'collections.deque', ([], {}), '()\n', (538, 540), False, 'import collections\n'), ((571, 590), 'collections.deque', 'collections.deque', ([], {}), '()\n', (588, 590), False, 'import collections\n'), ((1188, 1210), 'driver.Client', 'driver.Client', (['_server'], {}), '(_server)\n', (1201, 1210), False, 'import driver\n')] |
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
path
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop('Loan_ID',axis = 1)
print(banks)
print(banks.isnull().sum())
bank_mode = banks.mode().iloc[0]
banks = banks.fillna(bank_mode)
#code ends here
# --------------
# Code starts here
avg_loan_amount = banks.pivot_table(index=['Gender','Married','Self_Employed'],values = 'LoanAmount')
# code ends here
# --------------
# code starts here
loan_approved_se = ((banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')).value_counts()
#print(loan_approved_se)
loan_approved_nse = ((banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')).value_counts()
print(loan_approved_nse)
Loan_Status = 614
percentage_se = (56/Loan_Status)*100
percentage_nse = (366/Loan_Status)*100
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply (lambda x : int(x)/12)
print(loan_term.value_counts())
big_loan = [i for i in loan_term if i >= 25]
big_loan_term = len(big_loan)
print(big_loan_term)
#[loan_term.value_counts()[i] for i in range(len(loan_terms)) if loan_term.value_counts().index[i] >= 25]
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby['ApplicantIncome','Credit_History']
mean_values = loan_groupby.mean()
# code ends here
| [
"pandas.read_csv"
] | [((139, 156), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (150, 156), True, 'import pandas as pd\n')] |
# This file is part of Patsy
# Copyright (C) 2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# Regression tests for fixed bugs (when not otherwise better covered somewhere
# else)
from patsy import (EvalEnvironment, dmatrix, build_design_matrices,
PatsyError, Origin)
def test_issue_11():
# Give a sensible error message for level mismatches
# (At some points we've failed to put an origin= on these errors)
env = EvalEnvironment.capture()
data = {"X" : [0,1,2,3], "Y" : [1,2,3,4]}
formula = "C(X) + Y"
new_data = {"X" : [0,0,1,2,3,3,4], "Y" : [1,2,3,4,5,6,7]}
info = dmatrix(formula, data)
try:
build_design_matrices([info.design_info], new_data)
except PatsyError as e:
assert e.origin == Origin(formula, 0, 4)
else:
assert False
| [
"patsy.dmatrix",
"patsy.EvalEnvironment.capture",
"patsy.build_design_matrices",
"patsy.Origin"
] | [((470, 495), 'patsy.EvalEnvironment.capture', 'EvalEnvironment.capture', ([], {}), '()\n', (493, 495), False, 'from patsy import EvalEnvironment, dmatrix, build_design_matrices, PatsyError, Origin\n'), ((640, 662), 'patsy.dmatrix', 'dmatrix', (['formula', 'data'], {}), '(formula, data)\n', (647, 662), False, 'from patsy import EvalEnvironment, dmatrix, build_design_matrices, PatsyError, Origin\n'), ((680, 731), 'patsy.build_design_matrices', 'build_design_matrices', (['[info.design_info]', 'new_data'], {}), '([info.design_info], new_data)\n', (701, 731), False, 'from patsy import EvalEnvironment, dmatrix, build_design_matrices, PatsyError, Origin\n'), ((787, 808), 'patsy.Origin', 'Origin', (['formula', '(0)', '(4)'], {}), '(formula, 0, 4)\n', (793, 808), False, 'from patsy import EvalEnvironment, dmatrix, build_design_matrices, PatsyError, Origin\n')] |
__all__ = ['imread', 'imsave']
import numpy as np
from PIL import Image
from ...util import img_as_ubyte, img_as_uint
def imread(fname, dtype=None, img_num=None, **kwargs):
"""Load an image from file.
Parameters
----------
fname : str or file
File name or file-like-object.
dtype : numpy dtype object or string specifier
Specifies data type of array elements.
img_num : int, optional
Specifies which image to read in a file with multiple images
(zero-indexed).
kwargs : keyword pairs, optional
Addition keyword arguments to pass through.
Notes
-----
Files are read using the Python Imaging Library.
See PIL docs [1]_ for a list of supported formats.
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
if isinstance(fname, str):
with open(fname, 'rb') as f:
im = Image.open(f)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
else:
im = Image.open(fname)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
def pil_to_ndarray(image, dtype=None, img_num=None):
"""Import a PIL Image object to an ndarray, in memory.
Parameters
----------
Refer to ``imread``.
"""
try:
# this will raise an IOError if the file is not readable
image.getdata()[0]
except IOError as e:
site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
pillow_error_message = str(e)
error_message = ('Could not load "%s" \n'
'Reason: "%s"\n'
'Please see documentation at: %s'
% (image.filename, pillow_error_message, site))
raise ValueError(error_message)
frames = []
grayscale = None
i = 0
while 1:
try:
image.seek(i)
except EOFError:
break
frame = image
if img_num is not None and img_num != i:
image.getdata()[0]
i += 1
continue
if image.format == 'PNG' and image.mode == 'I' and dtype is None:
dtype = 'uint16'
if image.mode == 'P':
if grayscale is None:
grayscale = _palette_is_grayscale(image)
if grayscale:
frame = image.convert('L')
else:
if image.format == 'PNG' and 'transparency' in image.info:
frame = image.convert('RGBA')
else:
frame = image.convert('RGB')
elif image.mode == '1':
frame = image.convert('L')
elif 'A' in image.mode:
frame = image.convert('RGBA')
elif image.mode == 'CMYK':
frame = image.convert('RGB')
if image.mode.startswith('I;16'):
shape = image.size
dtype = '>u2' if image.mode.endswith('B') else '<u2'
if 'S' in image.mode:
dtype = dtype.replace('u', 'i')
frame = np.fromstring(frame.tobytes(), dtype)
frame.shape = shape[::-1]
else:
frame = np.array(frame, dtype=dtype)
frames.append(frame)
i += 1
if img_num is not None:
break
if hasattr(image, 'fp') and image.fp:
image.fp.close()
if img_num is None and len(frames) > 1:
return np.array(frames)
elif frames:
return frames[0]
elif img_num:
raise IndexError('Could not find image #%s' % img_num)
def _palette_is_grayscale(pil_image):
"""Return True if PIL image in palette mode is grayscale.
Parameters
----------
pil_image : PIL image
PIL Image that is in Palette mode.
Returns
-------
is_grayscale : bool
True if all colors in image palette are gray.
"""
assert pil_image.mode == 'P'
# get palette as an array with R, G, B columns
palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
# Not all palette colors are used; unused colors have junk values.
start, stop = pil_image.getextrema()
valid_palette = palette[start:stop + 1]
# Image is grayscale if channel differences (R - G and G - B)
# are all zero.
return np.allclose(np.diff(valid_palette), 0)
def ndarray_to_pil(arr, format_str=None):
"""Export an ndarray to a PIL object.
Parameters
----------
Refer to ``imsave``.
"""
if arr.ndim == 3:
arr = img_as_ubyte(arr)
mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
elif format_str in ['png', 'PNG']:
mode = 'I;16'
mode_base = 'I'
if arr.dtype.kind == 'f':
arr = img_as_uint(arr)
elif arr.max() < 256 and arr.min() >= 0:
arr = arr.astype(np.uint8)
mode = mode_base = 'L'
else:
arr = img_as_uint(arr)
else:
arr = img_as_ubyte(arr)
mode = 'L'
mode_base = 'L'
try:
array_buffer = arr.tobytes()
except AttributeError:
array_buffer = arr.tostring() # Numpy < 1.9
if arr.ndim == 2:
im = Image.new(mode_base, arr.T.shape)
try:
im.frombytes(array_buffer, 'raw', mode)
except AttributeError:
im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
else:
image_shape = (arr.shape[1], arr.shape[0])
try:
im = Image.frombytes(mode, image_shape, array_buffer)
except AttributeError:
im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
return im
def imsave(fname, arr, format_str=None, **kwargs):
"""Save an image to disk.
Parameters
----------
fname : str or file-like object
Name of destination file.
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
Notes
-----
Use the Python Imaging Library.
See PIL docs [1]_ for a list of other supported formats.
All images besides single channel PNGs are converted using `img_as_uint8`.
Single Channel PNGs have the following behavior:
- Integer values in [0, 255] and Boolean types -> img_as_uint8
- Floating point and other integers -> img_as_uint16
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG if file-like object
if not isinstance(fname, str) and format_str is None:
format_str = "PNG"
# Check for png in filename
if (isinstance(fname, str)
and fname.lower().endswith(".png")):
format_str = "PNG"
arr = np.asanyarray(arr)
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % (arr.shape, ))
if arr.ndim == 3:
if arr.shape[2] not in (3, 4):
raise ValueError("Invalid number of channels in image array.")
img = ndarray_to_pil(arr, format_str=format_str)
img.save(fname, format=format_str, **kwargs)
| [
"PIL.Image.open",
"PIL.Image.new",
"numpy.diff",
"numpy.asanyarray",
"numpy.array",
"PIL.Image.fromstring",
"PIL.Image.frombytes"
] | [((7329, 7347), 'numpy.asanyarray', 'np.asanyarray', (['arr'], {}), '(arr)\n', (7342, 7347), True, 'import numpy as np\n'), ((1049, 1066), 'PIL.Image.open', 'Image.open', (['fname'], {}), '(fname)\n', (1059, 1066), False, 'from PIL import Image\n'), ((3457, 3473), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (3465, 3473), True, 'import numpy as np\n'), ((4327, 4349), 'numpy.diff', 'np.diff', (['valid_palette'], {}), '(valid_palette)\n', (4334, 4349), True, 'import numpy as np\n'), ((5189, 5222), 'PIL.Image.new', 'Image.new', (['mode_base', 'arr.T.shape'], {}), '(mode_base, arr.T.shape)\n', (5198, 5222), False, 'from PIL import Image\n'), ((944, 957), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (954, 957), False, 'from PIL import Image\n'), ((3204, 3232), 'numpy.array', 'np.array', (['frame'], {'dtype': 'dtype'}), '(frame, dtype=dtype)\n', (3212, 3232), True, 'import numpy as np\n'), ((5476, 5524), 'PIL.Image.frombytes', 'Image.frombytes', (['mode', 'image_shape', 'array_buffer'], {}), '(mode, image_shape, array_buffer)\n', (5491, 5524), False, 'from PIL import Image\n'), ((5573, 5622), 'PIL.Image.fromstring', 'Image.fromstring', (['mode', 'image_shape', 'array_buffer'], {}), '(mode, image_shape, array_buffer)\n', (5589, 5622), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
"""
Linear chain of reactions.
"""
from __future__ import print_function, division
import tellurium as te
model = '''
model feedback()
// Reactions:
J0: $X0 -> S1; (VM1 * (X0 - S1/Keq1))/(1 + X0 + S1 + S4^h);
J1: S1 -> S2; (10 * S1 - 2 * S2) / (1 + S1 + S2);
J2: S2 -> S3; (10 * S2 - 2 * S3) / (1 + S2 + S3);
J3: S3 -> S4; (10 * S3 - 2 * S4) / (1 + S3 + S4);
J4: S4 -> $X1; (V4 * S4) / (KS4 + S4);
// Species initializations:
S1 = 0; S2 = 0; S3 = 0;
S4 = 0; X0 = 10; X1 = 0;
// Variable initialization:
VM1 = 10; Keq1 = 10; h = 10; V4 = 2.5; KS4 = 0.5;
end'''
r = te.loada(model)
result = r.simulate(0, 40, 500)
r.plotWithLegend(result)
| [
"tellurium.loada"
] | [((622, 637), 'tellurium.loada', 'te.loada', (['model'], {}), '(model)\n', (630, 637), True, 'import tellurium as te\n')] |
# This version of the bitcoin experiment imports data preprocessed in Matlab, and uses the GCN baseline
# The point of this script is to do link prediction
# Imports and aliases
import pickle
import torch as t
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.datasets as datasets
import numpy as np
import matplotlib.pyplot as plt
import cProfile
import pandas as pd
import datetime
from scipy.sparse import csr_matrix
import os.path
import embedding_help_functions as ehf
import scipy.io as sio
unsq = t.unsqueeze
sq = t.squeeze
# Settings
alpha_vec = [.75, .76, .77, .78, .79, .80, .81, .82, .83, .84, .85, .86, .87, .88, .89, .90, .91, .92, .93, .94, .95]
no_layers = 1
dataset = "OTC" # OTC or Alpha
no_epochs = 1000
mat_f_name = "saved_content_bitcoin_otc.mat"
no_trials = 1
beta1 = 19
beta2 = 19
cutoff = 95
eval_type = "MAP-MRR" # "MAP-MRR" or "F1"
data_loc = "data/Bitcoin_" + dataset + "/"
S_train, S_val, S_test = 95, 20, 20
lr = 0.01
momentum = 0.9
# Load and return relevant data
A, A_labels, C_train, C_val, C_test, N = ehf.load_data(data_loc, mat_f_name, S_train, S_val, S_test, transformed=False)
# Create features for the nodes
X_train, X_val, X_test = ehf.create_node_features(A, S_train, S_val, S_test, same_block_size=False)
# Extract edges and labels from A_labels, and augment with nonexisting edges
# edges, beta
edges = A_labels._indices()
edges_aug, labels = ehf.augment_edges(edges, N, beta1, beta2, cutoff)
# Divide adjacency matrices and labels into training, validation and testing sets
edges_train, target_train, e_train, edges_val, target_val, e_val, edges_test, target_test, e_test = ehf.split_data(edges_aug, labels, S_train, S_val, S_test, same_block_size = False)
if no_trials > 1:
ep_acc_loss_vec = []
for tr in range(no_trials):
for alpha in alpha_vec:
class_weights = t.tensor([alpha, 1.0-alpha])
save_res_fname = "results_BASELINE_layers" + str(no_layers) + "_w" + str(round(float(class_weights[0])*100)) + "_" + dataset + "_link_prediction"
# Create gcn for training
if no_layers == 2:
gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,6,2], nonlin2="selu")
elif no_layers == 1:
gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,2])
# Train
optimizer = t.optim.SGD(gcn.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss(weight=class_weights) # Takes arguments (output, target)
if eval_type == "F1":
ep_acc_loss = np.zeros((no_epochs,12)) # (precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test)
elif eval_type == "MAP-MRR":
ep_acc_loss = np.zeros((no_epochs,9)) # (MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test)
for ep in range(no_epochs):
# Compute loss and take step
optimizer.zero_grad()
output_train = gcn()
loss_train = criterion(output_train, target_train[edges_train[0]!=0])
loss_train.backward()
optimizer.step()
# Things that don't require gradient
with t.no_grad():
if ep % 100 == 0:
# Compute stats for training data; no point in doing more often than this
guess_train = t.argmax(output_train, dim=1)
if eval_type == "F1":
precision_train, recall_train, f1_train = ehf.compute_f1(guess_train, target_train[edges_train[0]!=0])
elif eval_type == "MAP-MRR":
MAP_train, MRR_train = ehf.compute_MAP_MRR(output_train, target_train[edges_train[0]!=0], edges_train[:, edges_train[0]!=0])
# Compute stats for validation data
output_val = gcn(C_val[:-1], X_val[:-1], e_val)
guess_val = t.argmax(output_val, dim=1)
if eval_type == "F1":
precision_val, recall_val, f1_val = ehf.compute_f1(guess_val, target_val[edges_val[0]!=0])
elif eval_type == "MAP-MRR":
MAP_val, MRR_val = ehf.compute_MAP_MRR(output_val, target_val[edges_val[0]!=0], edges_val[:, edges_val[0]!=0])
loss_val = criterion(output_val, target_val[edges_val[0]!=0])
# Compute stats for test data
output_test = gcn(C_test[:-1], X_test[:-1], e_test)
guess_test = t.argmax(output_test, dim=1)
if eval_type == "F1":
precision_test, recall_test, f1_test = ehf.compute_f1(guess_test, target_test[edges_test[0]!=0])
elif eval_type == "MAP-MRR":
MAP_test, MRR_test = ehf.compute_MAP_MRR(output_test, target_test[edges_test[0]!=0], edges_test[:, edges_test[0]!=0])
loss_test = criterion(output_test, target_test[edges_test[0]!=0])
# Print
if eval_type == "F1":
ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, alpha, tr, ep)
elif eval_type == "MAP-MRR":
print("alpha/Tr/Ep %.2f/%d/%d. Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (alpha, tr, ep, MAP_train, MRR_train, loss_train))
print("alpha/Tr/Ep %.2f/%d/%d. Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (alpha, tr, ep, MAP_val, MRR_val, loss_val))
print("alpha/Tr/Ep %.2f/%d/%d. Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (alpha, tr, ep, MAP_test, MRR_test, loss_test))
# Store values with results
if eval_type == "F1":
ep_acc_loss[ep] = [precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test]
elif eval_type == "MAP-MRR":
ep_acc_loss[ep] = [MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test]
if eval_type == "F1":
ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, is_final=True)
elif eval_type == "MAP-MRR":
print("FINAL: Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (MAP_train, MRR_train, loss_train))
print("FINAL: Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (MAP_val, MRR_val, loss_val))
print("FINAL: Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (MAP_test, MRR_test, loss_test))
if no_trials == 1:
pickle.dump(ep_acc_loss, open(save_res_fname, "wb"))
print("Results saved for single trial")
else:
ep_acc_loss_vec.append(ep_acc_loss)
if no_trials > 1:
pickle.dump(ep_acc_loss_vec, open(save_res_fname + "_no_trials" + str(no_trials), "wb"))
print("Results saved for all trials") | [
"embedding_help_functions.load_data",
"embedding_help_functions.split_data",
"embedding_help_functions.compute_f1",
"embedding_help_functions.compute_MAP_MRR",
"torch.nn.CrossEntropyLoss",
"embedding_help_functions.create_node_features",
"torch.argmax",
"torch.tensor",
"numpy.zeros",
"embedding_help_functions.EmbeddingKWGCN",
"embedding_help_functions.print_f1",
"torch.no_grad",
"embedding_help_functions.augment_edges"
] | [((1080, 1158), 'embedding_help_functions.load_data', 'ehf.load_data', (['data_loc', 'mat_f_name', 'S_train', 'S_val', 'S_test'], {'transformed': '(False)'}), '(data_loc, mat_f_name, S_train, S_val, S_test, transformed=False)\n', (1093, 1158), True, 'import embedding_help_functions as ehf\n'), ((1217, 1291), 'embedding_help_functions.create_node_features', 'ehf.create_node_features', (['A', 'S_train', 'S_val', 'S_test'], {'same_block_size': '(False)'}), '(A, S_train, S_val, S_test, same_block_size=False)\n', (1241, 1291), True, 'import embedding_help_functions as ehf\n'), ((1432, 1481), 'embedding_help_functions.augment_edges', 'ehf.augment_edges', (['edges', 'N', 'beta1', 'beta2', 'cutoff'], {}), '(edges, N, beta1, beta2, cutoff)\n', (1449, 1481), True, 'import embedding_help_functions as ehf\n'), ((1665, 1750), 'embedding_help_functions.split_data', 'ehf.split_data', (['edges_aug', 'labels', 'S_train', 'S_val', 'S_test'], {'same_block_size': '(False)'}), '(edges_aug, labels, S_train, S_val, S_test, same_block_size=False\n )\n', (1679, 1750), True, 'import embedding_help_functions as ehf\n'), ((1861, 1891), 'torch.tensor', 't.tensor', (['[alpha, 1.0 - alpha]'], {}), '([alpha, 1.0 - alpha])\n', (1869, 1891), True, 'import torch as t\n'), ((2368, 2409), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'class_weights'}), '(weight=class_weights)\n', (2387, 2409), True, 'import torch.nn as nn\n'), ((2097, 2184), 'embedding_help_functions.EmbeddingKWGCN', 'ehf.EmbeddingKWGCN', (['C_train[:-1]', 'X_train[:-1]', 'e_train', '[6, 6, 2]'], {'nonlin2': '"""selu"""'}), "(C_train[:-1], X_train[:-1], e_train, [6, 6, 2], nonlin2=\n 'selu')\n", (2115, 2184), True, 'import embedding_help_functions as ehf\n'), ((2486, 2511), 'numpy.zeros', 'np.zeros', (['(no_epochs, 12)'], {}), '((no_epochs, 12))\n', (2494, 2511), True, 'import numpy as np\n'), ((5624, 5806), 'embedding_help_functions.print_f1', 'ehf.print_f1', (['precision_train', 'recall_train', 'f1_train', 'loss_train', 'precision_val', 'recall_val', 'f1_val', 'loss_val', 'precision_test', 'recall_test', 'f1_test', 'loss_test'], {'is_final': '(True)'}), '(precision_train, recall_train, f1_train, loss_train,\n precision_val, recall_val, f1_val, loss_val, precision_test,\n recall_test, f1_test, loss_test, is_final=True)\n', (5636, 5806), True, 'import embedding_help_functions as ehf\n'), ((2210, 2273), 'embedding_help_functions.EmbeddingKWGCN', 'ehf.EmbeddingKWGCN', (['C_train[:-1]', 'X_train[:-1]', 'e_train', '[6, 2]'], {}), '(C_train[:-1], X_train[:-1], e_train, [6, 2])\n', (2228, 2273), True, 'import embedding_help_functions as ehf\n'), ((2709, 2733), 'numpy.zeros', 'np.zeros', (['(no_epochs, 9)'], {}), '((no_epochs, 9))\n', (2717, 2733), True, 'import numpy as np\n'), ((3108, 3119), 'torch.no_grad', 't.no_grad', ([], {}), '()\n', (3117, 3119), True, 'import torch as t\n'), ((3241, 3270), 'torch.argmax', 't.argmax', (['output_train'], {'dim': '(1)'}), '(output_train, dim=1)\n', (3249, 3270), True, 'import torch as t\n'), ((3684, 3711), 'torch.argmax', 't.argmax', (['output_val'], {'dim': '(1)'}), '(output_val, dim=1)\n', (3692, 3711), True, 'import torch as t\n'), ((4170, 4198), 'torch.argmax', 't.argmax', (['output_test'], {'dim': '(1)'}), '(output_test, dim=1)\n', (4178, 4198), True, 'import torch as t\n'), ((3346, 3408), 'embedding_help_functions.compute_f1', 'ehf.compute_f1', (['guess_train', 'target_train[edges_train[0] != 0]'], {}), '(guess_train, target_train[edges_train[0] != 0])\n', (3360, 3408), True, 'import embedding_help_functions as ehf\n'), ((3781, 3837), 'embedding_help_functions.compute_f1', 'ehf.compute_f1', (['guess_val', 'target_val[edges_val[0] != 0]'], {}), '(guess_val, target_val[edges_val[0] != 0])\n', (3795, 3837), True, 'import embedding_help_functions as ehf\n'), ((4271, 4330), 'embedding_help_functions.compute_f1', 'ehf.compute_f1', (['guess_test', 'target_test[edges_test[0] != 0]'], {}), '(guess_test, target_test[edges_test[0] != 0])\n', (4285, 4330), True, 'import embedding_help_functions as ehf\n'), ((4605, 4787), 'embedding_help_functions.print_f1', 'ehf.print_f1', (['precision_train', 'recall_train', 'f1_train', 'loss_train', 'precision_val', 'recall_val', 'f1_val', 'loss_val', 'precision_test', 'recall_test', 'f1_test', 'loss_test', 'alpha', 'tr', 'ep'], {}), '(precision_train, recall_train, f1_train, loss_train,\n precision_val, recall_val, f1_val, loss_val, precision_test,\n recall_test, f1_test, loss_test, alpha, tr, ep)\n', (4617, 4787), True, 'import embedding_help_functions as ehf\n'), ((3470, 3579), 'embedding_help_functions.compute_MAP_MRR', 'ehf.compute_MAP_MRR', (['output_train', 'target_train[edges_train[0] != 0]', 'edges_train[:, edges_train[0] != 0]'], {}), '(output_train, target_train[edges_train[0] != 0],\n edges_train[:, edges_train[0] != 0])\n', (3489, 3579), True, 'import embedding_help_functions as ehf\n'), ((3895, 3994), 'embedding_help_functions.compute_MAP_MRR', 'ehf.compute_MAP_MRR', (['output_val', 'target_val[edges_val[0] != 0]', 'edges_val[:, edges_val[0] != 0]'], {}), '(output_val, target_val[edges_val[0] != 0], edges_val[:,\n edges_val[0] != 0])\n', (3914, 3994), True, 'import embedding_help_functions as ehf\n'), ((4390, 4494), 'embedding_help_functions.compute_MAP_MRR', 'ehf.compute_MAP_MRR', (['output_test', 'target_test[edges_test[0] != 0]', 'edges_test[:, edges_test[0] != 0]'], {}), '(output_test, target_test[edges_test[0] != 0],\n edges_test[:, edges_test[0] != 0])\n', (4409, 4494), True, 'import embedding_help_functions as ehf\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
from PKC_Classes import NetworkUser, KDC
from DES import DES
from RSA_Class import RSA
import socket
import os
import sys
import threading
import time
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
def reply_conn(conn, addr):
print('Accept new connection from user {0}'.format(addr));
#conn.settimeout(500)
# conn.send(b'Hi, This is bob. Waiting for your sess key')
buf = conn.recv(1024)
while True:
if buf:
receive_packet = bytes.decode(buf).rstrip('\x00')
reply_packet = bob.process_packet(receive_packet)
conn.send(reply_packet.encode())
buf = conn.recv(1024)
else:
time.sleep(0.5)
conn.close()
bob = NetworkUser('Alice', DES(), RSA(9973, 97), 200)
print('bob:', bob.uid)
# socket communication
kdc_host, kdc_port = 'localhost', 9999
bob_host, bob_port = 'localhost', 9200
# talk to kdc for sess key
try:
sock_with_kdc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_with_kdc.connect((kdc_host, kdc_port))
print(sock_with_kdc.recv(1024))
# send cipher_key
bob_cipher_key_packet = bob.send_cipher_key()
sock_with_kdc.send(bob_cipher_key_packet.encode())
kdc_bob_cipher_key_packet = sock_with_kdc.recv(1024).decode()
print(kdc_bob_cipher_key_packet)
bob.process_packet(kdc_bob_cipher_key_packet)
except socket.error as msg:
print(msg);
sys.exit(1)
# sock_with_kdc.shutdown(socket.SHUT_WR)
# talk to bob
try:
sock_self = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_self.bind((bob_host, bob_port))
sock_self.listen(10)
except socket.error as msg:
print(msg);
sys.exit(1)
while 1:
conn, addr = sock_self.accept()
thread = threading.Thread(target=reply_conn, args=(conn, addr))
thread.start()
# sock_self.close()
| [
"socket.socket",
"time.sleep",
"RSA_Class.RSA",
"sys.exit",
"threading.Thread",
"DES.DES"
] | [((804, 809), 'DES.DES', 'DES', ([], {}), '()\n', (807, 809), False, 'from DES import DES\n'), ((811, 824), 'RSA_Class.RSA', 'RSA', (['(9973)', '(97)'], {}), '(9973, 97)\n', (814, 824), False, 'from RSA_Class import RSA\n'), ((1007, 1056), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1020, 1056), False, 'import socket\n'), ((1561, 1610), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1574, 1610), False, 'import socket\n'), ((1865, 1919), 'threading.Thread', 'threading.Thread', ([], {'target': 'reply_conn', 'args': '(conn, addr)'}), '(target=reply_conn, args=(conn, addr))\n', (1881, 1919), False, 'import threading\n'), ((1471, 1482), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1479, 1482), False, 'import sys\n'), ((1794, 1805), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1802, 1805), False, 'import sys\n'), ((743, 758), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (753, 758), False, 'import time\n')] |
"""empty message
Revision ID: 0084_add_job_stats
Revises: 0083_add_perm_types_and_svc_perm
Create Date: 2017-05-12 13:16:14.147368
"""
# revision identifiers, used by Alembic.
revision = "0084_add_job_stats"
down_revision = "0083_add_perm_types_and_svc_perm"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table(
"job_statistics",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("job_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("emails_sent", sa.BigInteger(), nullable=False),
sa.Column("emails_delivered", sa.BigInteger(), nullable=False),
sa.Column("emails_failed", sa.BigInteger(), nullable=False),
sa.Column("sms_sent", sa.BigInteger(), nullable=False),
sa.Column("sms_delivered", sa.BigInteger(), nullable=False),
sa.Column("sms_failed", sa.BigInteger(), nullable=False),
sa.Column("letters_sent", sa.BigInteger(), nullable=False),
sa.Column("letters_failed", sa.BigInteger(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["job_id"],
["jobs.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_job_statistics_job_id"), "job_statistics", ["job_id"], unique=True)
def downgrade():
op.drop_index(op.f("ix_job_statistics_job_id"), table_name="job_statistics")
op.drop_table("job_statistics")
| [
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.DateTime",
"alembic.op.drop_table",
"alembic.op.f",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.dialects.postgresql.UUID",
"sqlalchemy.BigInteger"
] | [((1575, 1606), 'alembic.op.drop_table', 'op.drop_table', (['"""job_statistics"""'], {}), "('job_statistics')\n", (1588, 1606), False, 'from alembic import op\n'), ((1244, 1292), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['job_id']", "['jobs.id']"], {}), "(['job_id'], ['jobs.id'])\n", (1267, 1292), True, 'import sqlalchemy as sa\n'), ((1337, 1366), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1360, 1366), True, 'import sqlalchemy as sa\n'), ((1394, 1426), 'alembic.op.f', 'op.f', (['"""ix_job_statistics_job_id"""'], {}), "('ix_job_statistics_job_id')\n", (1398, 1426), False, 'from alembic import op\n'), ((1508, 1540), 'alembic.op.f', 'op.f', (['"""ix_job_statistics_job_id"""'], {}), "('ix_job_statistics_job_id')\n", (1512, 1540), False, 'from alembic import op\n'), ((441, 470), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (456, 470), False, 'from sqlalchemy.dialects import postgresql\n'), ((517, 546), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (532, 546), False, 'from sqlalchemy.dialects import postgresql\n'), ((598, 613), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (611, 613), True, 'import sqlalchemy as sa\n'), ((670, 685), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (683, 685), True, 'import sqlalchemy as sa\n'), ((739, 754), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (752, 754), True, 'import sqlalchemy as sa\n'), ((803, 818), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (816, 818), True, 'import sqlalchemy as sa\n'), ((872, 887), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (885, 887), True, 'import sqlalchemy as sa\n'), ((938, 953), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (951, 953), True, 'import sqlalchemy as sa\n'), ((1006, 1021), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (1019, 1021), True, 'import sqlalchemy as sa\n'), ((1076, 1091), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (1089, 1091), True, 'import sqlalchemy as sa\n'), ((1142, 1155), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1153, 1155), True, 'import sqlalchemy as sa\n'), ((1205, 1218), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1216, 1218), True, 'import sqlalchemy as sa\n')] |
import torch
import os
from torch import nn
import numpy as np
import torch.nn.functional
from termcolor import colored
from .logger import get_logger
def save_model(net, optim, scheduler, recorder, is_best=False):
model_dir = os.path.join(recorder.work_dir, 'ckpt')
os.system('mkdir -p {}'.format(model_dir))
epoch = recorder.epoch
ckpt_name = 'best' if is_best else epoch
torch.save({
'net': net.state_dict(),
'optim': optim.state_dict(),
'scheduler': scheduler.state_dict(),
'recorder': recorder.state_dict(),
'epoch': epoch
}, os.path.join(model_dir, '{}.pth'.format(ckpt_name)))
# remove previous pretrained model if the number of models is too big
# pths = [int(pth.split('.')[0]) for pth in os.listdir(model_dir)]
# if len(pths) <= 2:
# return
# os.system('rm {}'.format(os.path.join(model_dir, '{}.pth'.format(min(pths)))))
def load_network_specified(net, model_dir, logger=None):
pretrained_net = torch.load(model_dir)['net']
net_state = net.state_dict()
state = {}
for k, v in pretrained_net.items():
if k not in net_state.keys() or v.size() != net_state[k].size():
if logger:
logger.info('skip weights: ' + k)
continue
state[k] = v
net.load_state_dict(state, strict=False)
def load_network(net, model_dir, finetune_from=None, logger=None):
if finetune_from:
if logger:
logger.info('Finetune model from: ' + finetune_from)
load_network_specified(net, finetune_from, logger)
return
pretrained_model = torch.load(model_dir)
net.load_state_dict(pretrained_model['net'], strict=True)
| [
"torch.load",
"os.path.join"
] | [((232, 271), 'os.path.join', 'os.path.join', (['recorder.work_dir', '"""ckpt"""'], {}), "(recorder.work_dir, 'ckpt')\n", (244, 271), False, 'import os\n'), ((1624, 1645), 'torch.load', 'torch.load', (['model_dir'], {}), '(model_dir)\n', (1634, 1645), False, 'import torch\n'), ((1002, 1023), 'torch.load', 'torch.load', (['model_dir'], {}), '(model_dir)\n', (1012, 1023), False, 'import torch\n')] |
from __future__ import division
from timeit import default_timer as timer
import csv
import numpy as np
import itertools
from munkres import Munkres, print_matrix, make_cost_matrix
import sys
from classes import *
from functions import *
from math import sqrt
import Tkinter as tk
import tkFileDialog as filedialog
root = tk.Tk()
root.withdraw()
p_file = filedialog.askopenfilename(title='Please select the posting file')
c_file = filedialog.askopenfilename(title='Please select the candidate file')
"""for use with /users/java_jonathan/postings_lge.csv and
/Users/java_jonathan/candidates_lge.csv"""
# p_file = raw_input("Please enter the path for the postings file: ")
# p_file = p_file.strip()
# c_file = raw_input("Please enter the path for the candidate file: ")
# c_file = c_file.strip()
start = timer()
with open(p_file,'r') as f:
#with open('/Users/Jonathan/Google Drive/CPD/Python/postings.csv','r') as f:
reader = csv.reader(f)
postingsAll = list(reader)
with open(c_file,'r') as f:
reader = csv.reader(f)
candidatesAll = list(reader)
"""create empty lists to fill with lists of lists output by iterating function
below"""
names = []
totalMatrix = []
for list in candidatesAll:
candidate = Candidate(*list)
names.append(candidate.name)
n = 0
for list in postingsAll:
posting = Posting(*list)
totalMatrix.append(matchDept(posting,candidate) + matchAnchor(posting,candidate)
+matchLocation(posting,candidate) + matchCompetency(posting,candidate) +
matchSkill(posting,candidate)+matchCohort(posting,candidate))
n += 1
l = len(names)
names.extend([0] * (n-l))
totalMatrix.extend([0] * (n**2 - len(totalMatrix)))
totalMatrix = np.asarray(totalMatrix)
totalMatrix = np.reshape(totalMatrix,(n,-1))
#at this point the matrix is structured as candidates down and jobs across
totalMatrix = np.transpose(totalMatrix)
#now it's switched!
totalMatrix = np.subtract(np.amax(totalMatrix),totalMatrix)
totalMatrix = np.array(totalMatrix)
minSuitability = 18
check = []
result = []
m = Munkres()
indexes = m.compute(totalMatrix)
#print_matrix(totalMatrix, msg='Lowest cost through this matrix:')
total = 0.0
unhappy_candidates = 0
medium_candidates = 0
tenpc_candidates = 0
qs_candidates = 0
vs_candidates = 0
f = open('output.txt', 'w')
for row, column in indexes:
if column < l:
value = totalMatrix[row][column]
if value > minSuitability*0.9:
tenpc_candidates += 1
elif value > minSuitability*0.75:
medium_candidates += 1
elif value > minSuitability/2:
unhappy_candidates += 1
elif value > minSuitability*0.25:
qs_candidates += 1
elif value > minSuitability*0.1:
vs_candidates += 1
total += value
check.append(column+1)
result.append((row,column))
f.write('For candidate %s: \nOptimal position: %d (score %s)\n'
% (names[column], column+1, value))
else:
pass
globalSatisfaction = 100*(1-(total/(l*minSuitability)))
print('Global satisfaction: %.2f%%' % globalSatisfaction)
print('Candidates who are more than 90%% suitable: %d' % vs_candidates)
print('Candidates who are more than 75%% suitable: %d' % qs_candidates)
print('Candidates who are more than 50%% suitable: %d' % (l-unhappy_candidates))
print('Candidates who are more than 75%% unsuitable: %d' % medium_candidates)
print('Candidates who are more than 90%% unsuitable: %d' % tenpc_candidates)
#output from excel:
correct = [1,3,5,9,10,2,4,8,6,7]
#this function tests output above against Excel:
#test(correct,check)
topMatrix = topFive(names,totalMatrix)
#print(topMatrix)
np.savetxt('/Users/java_jonathan/test.csv',topMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
np.savetxt('/Users/java_jonathan/test2.csv',totalMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
end = timer()
print(end-start)
"""
#posting = [Posting(*postingsAll)]
#print(posting[0].anchor)
#print(posting)
#print(candidatesAll)
#print(postingsAll)
#print(postingsAll[0].name)
#print(preferences)
#print(postings)
#split up files into relative blocks
postCode = [lists[0] for lists in postings]
postDept = [lists[1] for lists in postings]
postAnchor = [lists[2] for lists in postings]
postSkills = [lists[3:5] for lists in postings]
postLocation = [lists[5] for lists in postings]
postCompetencies = [lists[7:10] for lists in postings]
postSecurity = [lists[10] for lists in postings]
#with open('/Users/Jonathan/Google Drive/CPD/Python/candidates.csv','r') as f:
#gives first column ie candidate a
a=totalMatrix[:,[0]]
#b = totalMatrix[:,[0]]
#print(a)
#converts 1D matrix to list for ease
a = np.array(a).tolist()
#print(a)
#creates list called output containing rank of score
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
#creates tuples of rank, job and appends to list
jobRank = []
# for rank, b in zip(output, postCode):
# jobScore = (rank,b)
# list(jobScore)
# jobRank.append(jobScore)
# print(jobRank)
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
# #print(a)
# jobRank = sorted(jobRank, reverse=False)
# print(jobRank)
# print('For candidate a, the best position is %s') % (jobRank[0][1])
# print(candidate[0].skills)
"""
| [
"numpy.reshape",
"numpy.amax",
"timeit.default_timer",
"Tkinter.Tk",
"numpy.asarray",
"numpy.array",
"munkres.Munkres",
"tkFileDialog.askopenfilename",
"numpy.savetxt",
"numpy.transpose",
"csv.reader"
] | [((323, 330), 'Tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (328, 330), True, 'import Tkinter as tk\n'), ((356, 422), 'tkFileDialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Please select the posting file"""'}), "(title='Please select the posting file')\n", (382, 422), True, 'import tkFileDialog as filedialog\n'), ((432, 500), 'tkFileDialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Please select the candidate file"""'}), "(title='Please select the candidate file')\n", (458, 500), True, 'import tkFileDialog as filedialog\n'), ((806, 813), 'timeit.default_timer', 'timer', ([], {}), '()\n', (811, 813), True, 'from timeit import default_timer as timer\n'), ((1712, 1735), 'numpy.asarray', 'np.asarray', (['totalMatrix'], {}), '(totalMatrix)\n', (1722, 1735), True, 'import numpy as np\n'), ((1751, 1783), 'numpy.reshape', 'np.reshape', (['totalMatrix', '(n, -1)'], {}), '(totalMatrix, (n, -1))\n', (1761, 1783), True, 'import numpy as np\n'), ((1871, 1896), 'numpy.transpose', 'np.transpose', (['totalMatrix'], {}), '(totalMatrix)\n', (1883, 1896), True, 'import numpy as np\n'), ((1991, 2012), 'numpy.array', 'np.array', (['totalMatrix'], {}), '(totalMatrix)\n', (1999, 2012), True, 'import numpy as np\n'), ((2060, 2069), 'munkres.Munkres', 'Munkres', ([], {}), '()\n', (2067, 2069), False, 'from munkres import Munkres, print_matrix, make_cost_matrix\n'), ((3680, 3815), 'numpy.savetxt', 'np.savetxt', (['"""/Users/java_jonathan/test.csv"""', 'topMatrix'], {'fmt': '"""%s"""', 'delimiter': '""","""', 'newline': '"""\n"""', 'header': '""""""', 'footer': '""""""', 'comments': '"""# """'}), "('/Users/java_jonathan/test.csv', topMatrix, fmt='%s', delimiter=\n ',', newline='\\n', header='', footer='', comments='# ')\n", (3690, 3815), True, 'import numpy as np\n'), ((3810, 3947), 'numpy.savetxt', 'np.savetxt', (['"""/Users/java_jonathan/test2.csv"""', 'totalMatrix'], {'fmt': '"""%s"""', 'delimiter': '""","""', 'newline': '"""\n"""', 'header': '""""""', 'footer': '""""""', 'comments': '"""# """'}), "('/Users/java_jonathan/test2.csv', totalMatrix, fmt='%s',\n delimiter=',', newline='\\n', header='', footer='', comments='# ')\n", (3820, 3947), True, 'import numpy as np\n'), ((3949, 3956), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3954, 3956), True, 'from timeit import default_timer as timer\n'), ((932, 945), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (942, 945), False, 'import csv\n'), ((1019, 1032), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1029, 1032), False, 'import csv\n'), ((1943, 1963), 'numpy.amax', 'np.amax', (['totalMatrix'], {}), '(totalMatrix)\n', (1950, 1963), True, 'import numpy as np\n')] |
__all__ = ["load"]
import imp
import importlib
def load(name, path):
"""Load and initialize a module implemented as a Python source file and return its module object"""
if hasattr(importlib, "machinery"):
loader = importlib.machinery.SourceFileLoader(name, path)
return loader.load_module()
return imp.load_source(name, path)
| [
"imp.load_source",
"importlib.machinery.SourceFileLoader"
] | [((330, 357), 'imp.load_source', 'imp.load_source', (['name', 'path'], {}), '(name, path)\n', (345, 357), False, 'import imp\n'), ((234, 282), 'importlib.machinery.SourceFileLoader', 'importlib.machinery.SourceFileLoader', (['name', 'path'], {}), '(name, path)\n', (270, 282), False, 'import importlib\n')] |
from fastapi import APIRouter
router = APIRouter()
@router.get("/")
def working():
return {"Working"}
| [
"fastapi.APIRouter"
] | [((40, 51), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (49, 51), False, 'from fastapi import APIRouter\n')] |
import logging
import numpy
from ..Fragments import Fragments
from ..typing import SpectrumType
logger = logging.getLogger("matchms")
def add_losses(spectrum_in: SpectrumType, loss_mz_from=0.0, loss_mz_to=1000.0) -> SpectrumType:
"""Derive losses based on precursor mass.
Parameters
----------
spectrum_in:
Input spectrum.
loss_mz_from:
Minimum allowed m/z value for losses. Default is 0.0.
loss_mz_to:
Maximum allowed m/z value for losses. Default is 1000.0.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
precursor_mz = spectrum.get("precursor_mz", None)
if precursor_mz:
assert isinstance(precursor_mz, (float, int)), ("Expected 'precursor_mz' to be a scalar number.",
"Consider applying 'add_precursor_mz' filter first.")
peaks_mz, peaks_intensities = spectrum.peaks.mz, spectrum.peaks.intensities
losses_mz = (precursor_mz - peaks_mz)[::-1]
losses_intensities = peaks_intensities[::-1]
# Add losses which are within given boundaries
mask = numpy.where((losses_mz >= loss_mz_from)
& (losses_mz <= loss_mz_to))
spectrum.losses = Fragments(mz=losses_mz[mask],
intensities=losses_intensities[mask])
else:
logger.warning("No precursor_mz found. Consider applying 'add_precursor_mz' filter first.")
return spectrum
| [
"logging.getLogger",
"numpy.where"
] | [((107, 135), 'logging.getLogger', 'logging.getLogger', (['"""matchms"""'], {}), "('matchms')\n", (124, 135), False, 'import logging\n'), ((1156, 1224), 'numpy.where', 'numpy.where', (['((losses_mz >= loss_mz_from) & (losses_mz <= loss_mz_to))'], {}), '((losses_mz >= loss_mz_from) & (losses_mz <= loss_mz_to))\n', (1167, 1224), False, 'import numpy\n')] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import platform
import unittest
import rspub.util.resourcefilter as rf
def on_windows():
opsys = platform.system()
return opsys == "Windows"
class TestPredicates(unittest.TestCase):
def test_directory_pattern_filter_empty(self):
dpf = rf.directory_pattern_predicate() # should pass all strings
self.assertTrue(dpf(""))
self.assertTrue(dpf("."))
self.assertTrue(dpf("\n"))
self.assertTrue(dpf("foo"))
# rejects not string
self.assertFalse(dpf(None))
self.assertFalse(dpf(42))
self.assertFalse(dpf(self))
def test_directory_pattern_filter(self):
dpf = rf.directory_pattern_predicate("abc")
self.assertTrue(dpf("foo/babcd/bar/some.txt"))
self.assertTrue(dpf("/abc/bar/some.txt"))
self.assertTrue(dpf("/foo/bar/abc/some.txt"))
#
self.assertFalse(dpf("/foo/bar/baz/abc.txt"))
# ##
dpf = rf.directory_pattern_predicate("^/abc")
self.assertTrue(dpf("/abc/bar/some.txt"))
#
self.assertFalse(dpf("abc/bar/some.txt"))
# #
dpf = rf.directory_pattern_predicate("abc$")
self.assertTrue(dpf("foo/bar/abc/some.txt"))
#
self.assertFalse(dpf("abc/abc/bar/some.txt"))
self.assertFalse(dpf("abc/abc/bar/abc.abc"))
@unittest.skipUnless(on_windows(), "Only tested on Windows.")
def test_directory_pattern_filter_windows(self):
dpf = rf.directory_pattern_predicate("abc")
self.assertTrue(dpf("foo/babcd/bar/some.txt"))
self.assertTrue(dpf("/abc/bar/some.txt"))
self.assertTrue(dpf("/foo/bar/abc/some.txt"))
self.assertTrue(dpf("foo\\babcd\\bar\\some.txt"))
self.assertTrue(dpf("c:\\abc\\bar\\some.txt"))
self.assertTrue(dpf("c:\\foo\\bar\\abc\\some.txt"))
#
self.assertFalse(dpf("/foo/bar/baz/abc.txt"))
self.assertFalse(dpf("c:\\foo\\bar\\baz\\abc.txt"))
# ##
dpf = rf.directory_pattern_predicate("^/abc")
self.assertTrue(dpf("/abc/bar/some.txt"))
#
self.assertFalse(dpf("abc/bar/some.txt"))
# #
dpf = rf.directory_pattern_predicate("^c:\\abc")
self.assertTrue(dpf("c:\\abc\\bar\\some.txt"))
#
self.assertFalse(dpf("abc\\bar\\some.txt"))
dpf = rf.directory_pattern_predicate("abc$")
self.assertTrue(dpf("foo/bar/abc/some.txt"))
self.assertTrue(dpf("foo\\bar\\abc\\some.txt"))
#
self.assertFalse(dpf("abc/abc/bar/some.txt"))
self.assertFalse(dpf("abc\\abc\\bar\\some.txt"))
self.assertFalse(dpf("abc/abc/bar/abc.abc"))
self.assertFalse(dpf("abc\\abc\\bar\\abc.abc"))
def test_last_modified_filter(self):
file_name = os.path.realpath(__file__)
lmaf = rf.last_modified_after_predicate()
self.assertTrue(lmaf(file_name))
lmaf = rf.last_modified_after_predicate(3000000000)
# valid until 2065-01-24 06:20:00
self.assertFalse(lmaf(file_name))
lmaf = rf.last_modified_after_predicate("2016-08-01")
self.assertTrue(lmaf(file_name))
def test_example(self):
import rspub.util.resourcefilter as rf
dir_ends_with_abc = rf.directory_pattern_predicate("abc$")
assert dir_ends_with_abc("/foo/bar/folder_abc/my_resource.txt")
assert not dir_ends_with_abc("/foo/bar/folder_def/my_resource.txt")
xml_file = rf.filename_pattern_predicate(".xml$")
assert xml_file("my_resource.xml")
assert not xml_file("my_resource.txt")
import rspub.util.gates as lf
xml_files_in_abc = lf.and_(dir_ends_with_abc, xml_file)
assert xml_files_in_abc("/foo/bar/folder_abc/my_resource.xml")
assert not xml_files_in_abc("/foo/bar/folder_abc/my_resource.txt")
assert not xml_files_in_abc("/foo/bar/folder_def/my_resource.xml")
recent = rf.last_modified_after_predicate("2016-08-01")
includes = [xml_files_in_abc]
excludes = [recent]
resource_gate = lf.gate(includes, excludes)
# print(type(resource_gate))
@unittest.skipUnless(on_windows(), "Only tested on Windows.")
def test_example_windows(self):
import rspub.util.resourcefilter as rf
dir_ends_with_abc = rf.directory_pattern_predicate("abc$")
assert dir_ends_with_abc("/foo/bar/folder_abc/my_resource.txt")
assert not dir_ends_with_abc("/foo/bar/folder_def/my_resource.txt")
xml_file = rf.filename_pattern_predicate(".xml$")
assert xml_file("my_resource.xml")
assert not xml_file("my_resource.txt")
import rspub.util.gates as lf
xml_files_in_abc = lf.and_(dir_ends_with_abc, xml_file)
assert xml_files_in_abc("/foo/bar/folder_abc/my_resource.xml")
assert not xml_files_in_abc("/foo/bar/folder_abc/my_resource.txt")
assert not xml_files_in_abc("/foo/bar/folder_def/my_resource.xml")
assert xml_files_in_abc("c:\\foo\\bar\\folder_abc\\my_resource.xml")
assert not xml_files_in_abc("c:\\foo\\bar\\folder_abc\\my_resource.txt")
assert not xml_files_in_abc("c:\\foo\\bar\\folder_def\\my_resource.xml")
recent = rf.last_modified_after_predicate("2016-08-01")
includes = [xml_files_in_abc]
excludes = [recent]
resource_gate = lf.gate(includes, excludes)
# print(type(resource_gate))
@unittest.skipUnless(on_windows(), "Only tested on Windows.")
def test_windows_to_unix(self):
path = os.path.expanduser("~")
dpf = rf.directory_pattern_predicate("^" + path)
self.assertTrue(dpf(os.path.join(path, "bla")))
dpf = rf.directory_pattern_predicate("^C:\\Users")
self.assertTrue(dpf(os.path.join(path, "bla")))
| [
"rspub.util.resourcefilter.directory_pattern_predicate",
"rspub.util.resourcefilter.filename_pattern_predicate",
"rspub.util.gates.and_",
"os.path.join",
"rspub.util.gates.gate",
"os.path.realpath",
"platform.system",
"rspub.util.resourcefilter.last_modified_after_predicate",
"os.path.expanduser"
] | [((162, 179), 'platform.system', 'platform.system', ([], {}), '()\n', (177, 179), False, 'import platform\n'), ((319, 351), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', ([], {}), '()\n', (349, 351), True, 'import rspub.util.resourcefilter as rf\n'), ((711, 748), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (['"""abc"""'], {}), "('abc')\n", (741, 748), True, 'import rspub.util.resourcefilter as rf\n'), ((1000, 1039), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (['"""^/abc"""'], {}), "('^/abc')\n", (1030, 1039), True, 'import rspub.util.resourcefilter as rf\n'), ((1177, 1215), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (['"""abc$"""'], {}), "('abc$')\n", (1207, 1215), True, 'import rspub.util.resourcefilter as rf\n'), ((1520, 1557), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (['"""abc"""'], {}), "('abc')\n", (1550, 1557), True, 'import rspub.util.resourcefilter as rf\n'), ((2043, 2082), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (['"""^/abc"""'], {}), "('^/abc')\n", (2073, 2082), True, 'import rspub.util.resourcefilter as rf\n'), ((2219, 2261), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (['"""^c:\\\\abc"""'], {}), "('^c:\\\\abc')\n", (2249, 2261), True, 'import rspub.util.resourcefilter as rf\n'), ((2394, 2432), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (['"""abc$"""'], {}), "('abc$')\n", (2424, 2432), True, 'import rspub.util.resourcefilter as rf\n'), ((2834, 2860), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2850, 2860), False, 'import os\n'), ((2877, 2911), 'rspub.util.resourcefilter.last_modified_after_predicate', 'rf.last_modified_after_predicate', ([], {}), '()\n', (2909, 2911), True, 'import rspub.util.resourcefilter as rf\n'), ((2969, 3013), 'rspub.util.resourcefilter.last_modified_after_predicate', 'rf.last_modified_after_predicate', (['(3000000000)'], {}), '(3000000000)\n', (3001, 3013), True, 'import rspub.util.resourcefilter as rf\n'), ((3114, 3160), 'rspub.util.resourcefilter.last_modified_after_predicate', 'rf.last_modified_after_predicate', (['"""2016-08-01"""'], {}), "('2016-08-01')\n", (3146, 3160), True, 'import rspub.util.resourcefilter as rf\n'), ((3307, 3345), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (['"""abc$"""'], {}), "('abc$')\n", (3337, 3345), True, 'import rspub.util.resourcefilter as rf\n'), ((3514, 3552), 'rspub.util.resourcefilter.filename_pattern_predicate', 'rf.filename_pattern_predicate', (['""".xml$"""'], {}), "('.xml$')\n", (3543, 3552), True, 'import rspub.util.resourcefilter as rf\n'), ((3710, 3746), 'rspub.util.gates.and_', 'lf.and_', (['dir_ends_with_abc', 'xml_file'], {}), '(dir_ends_with_abc, xml_file)\n', (3717, 3746), True, 'import rspub.util.gates as lf\n'), ((3986, 4032), 'rspub.util.resourcefilter.last_modified_after_predicate', 'rf.last_modified_after_predicate', (['"""2016-08-01"""'], {}), "('2016-08-01')\n", (4018, 4032), True, 'import rspub.util.resourcefilter as rf\n'), ((4124, 4151), 'rspub.util.gates.gate', 'lf.gate', (['includes', 'excludes'], {}), '(includes, excludes)\n', (4131, 4151), True, 'import rspub.util.gates as lf\n'), ((4368, 4406), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (['"""abc$"""'], {}), "('abc$')\n", (4398, 4406), True, 'import rspub.util.resourcefilter as rf\n'), ((4575, 4613), 'rspub.util.resourcefilter.filename_pattern_predicate', 'rf.filename_pattern_predicate', (['""".xml$"""'], {}), "('.xml$')\n", (4604, 4613), True, 'import rspub.util.resourcefilter as rf\n'), ((4771, 4807), 'rspub.util.gates.and_', 'lf.and_', (['dir_ends_with_abc', 'xml_file'], {}), '(dir_ends_with_abc, xml_file)\n', (4778, 4807), True, 'import rspub.util.gates as lf\n'), ((5287, 5333), 'rspub.util.resourcefilter.last_modified_after_predicate', 'rf.last_modified_after_predicate', (['"""2016-08-01"""'], {}), "('2016-08-01')\n", (5319, 5333), True, 'import rspub.util.resourcefilter as rf\n'), ((5425, 5452), 'rspub.util.gates.gate', 'lf.gate', (['includes', 'excludes'], {}), '(includes, excludes)\n', (5432, 5452), True, 'import rspub.util.gates as lf\n'), ((5608, 5631), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (5626, 5631), False, 'import os\n'), ((5646, 5688), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (["('^' + path)"], {}), "('^' + path)\n", (5676, 5688), True, 'import rspub.util.resourcefilter as rf\n'), ((5760, 5804), 'rspub.util.resourcefilter.directory_pattern_predicate', 'rf.directory_pattern_predicate', (['"""^C:\\\\Users"""'], {}), "('^C:\\\\Users')\n", (5790, 5804), True, 'import rspub.util.resourcefilter as rf\n'), ((5717, 5742), 'os.path.join', 'os.path.join', (['path', '"""bla"""'], {}), "(path, 'bla')\n", (5729, 5742), False, 'import os\n'), ((5833, 5858), 'os.path.join', 'os.path.join', (['path', '"""bla"""'], {}), "(path, 'bla')\n", (5845, 5858), False, 'import os\n')] |
# uncompyle6 version 2.11.3
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, May 23 2015, 09:40:32) [MSC v.1500 32 bit (Intel)]
# Embedded file name: scripts/common/dossiers2/custom/cache.py
import nations
from items import vehicles
def getCache():
global _g_cache
return _g_cache
def buildCache():
vehiclesByLevel = {}
vehiclesByTag = {'beast': set(),'sinai': set(),'patton': set()}
vehiclesInTreeByNation = {}
vehiclesInTree = set()
nationsWithVehiclesInTree = []
unlocksSources = vehicles.getUnlocksSources()
for nationIdx in xrange(len(nations.NAMES)):
nationList = vehicles.g_list.getList(nationIdx)
vehiclesInNationTree = set()
for vehDescr in nationList.itervalues():
vehiclesByLevel.setdefault(vehDescr.level, set()).add(vehDescr.compactDescr)
for tag in ('beast', 'sinai', 'patton'):
if tag in vehDescr.tags:
vehiclesByTag[tag].add(vehDescr.compactDescr)
if len(unlocksSources.get(vehDescr.compactDescr, set())) > 0 or len(vehicles.g_cache.vehicle(nationIdx, vehDescr.id).unlocksDescrs) > 0:
vehiclesInNationTree.add(vehDescr.compactDescr)
vehiclesInTree.update(vehiclesInNationTree)
vehiclesInTreeByNation[nationIdx] = vehiclesInNationTree
if bool(vehiclesInNationTree):
nationsWithVehiclesInTree.append(nationIdx)
vehicles8p = vehiclesByLevel[8] | vehiclesByLevel[9] | vehiclesByLevel[10]
_g_cache.update({'vehiclesByLevel': vehiclesByLevel,
'vehicles8+': vehicles8p,
'vehiclesByTag': vehiclesByTag,
'mausTypeCompDescr': vehicles.makeVehicleTypeCompDescrByName('germany:G42_Maus'),
'vehiclesInTreesByNation': vehiclesInTreeByNation,
'vehiclesInTrees': vehiclesInTree,
'nationsWithVehiclesInTree': nationsWithVehiclesInTree
})
_g_cache = {} | [
"items.vehicles.makeVehicleTypeCompDescrByName",
"items.vehicles.getUnlocksSources",
"items.vehicles.g_list.getList",
"items.vehicles.g_cache.vehicle"
] | [((542, 570), 'items.vehicles.getUnlocksSources', 'vehicles.getUnlocksSources', ([], {}), '()\n', (568, 570), False, 'from items import vehicles\n'), ((641, 675), 'items.vehicles.g_list.getList', 'vehicles.g_list.getList', (['nationIdx'], {}), '(nationIdx)\n', (664, 675), False, 'from items import vehicles\n'), ((1675, 1734), 'items.vehicles.makeVehicleTypeCompDescrByName', 'vehicles.makeVehicleTypeCompDescrByName', (['"""germany:G42_Maus"""'], {}), "('germany:G42_Maus')\n", (1714, 1734), False, 'from items import vehicles\n'), ((1092, 1140), 'items.vehicles.g_cache.vehicle', 'vehicles.g_cache.vehicle', (['nationIdx', 'vehDescr.id'], {}), '(nationIdx, vehDescr.id)\n', (1116, 1140), False, 'from items import vehicles\n')] |
from django.conf import settings
from django.core import serializers
from django.utils import timezone
import requests
from Posts.commentModel import Comments
#from Posts.commentView import add_Comment
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from django.shortcuts import HttpResponse, render
from requests import get
from .serializers import CommentSerializer, PostSerializer
from Author.serializers import LikeSerializer
from Author.models import Like
from Author.views import updateForeignAuthors, GetForeignAuthors
from .models import Post, Author
from .form import PostForm
from Posts.commentForm import CommentForm
import json
import uuid
import re
import base64
from django.db.models import Q
import django.core
from permissions import CustomAuthentication, AccessPermission
from django.core.paginator import Paginator
import traceback
def newPost(request, uid=None, auth_pk=None):
form = PostForm(request.POST, request.FILES)
if form.is_valid():
title = form.cleaned_data['title']
descirption = form.cleaned_data['description']
categories = form.cleaned_data['categories'].split(' ')
visibility = form.cleaned_data['visibility']
unlisted = form.cleaned_data['unlisted']
contentType = form.cleaned_data['contentType']
if contentType == "application/app":
content = request.FILES['file'].read() #Inputfile
elif contentType in ["image/png", "image/jpeg",]:
content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
content = form.cleaned_data["text"]
source = settings.SERVER_URL + "/"
origin = settings.SERVER_URL + "/"
author_id = Author.objects.get(pk=auth_pk)
id = author_id.url
author = json.loads(serializers.serialize('json', Author.objects.filter(pk=auth_pk), fields=('type', 'id', 'host', 'displayName', 'url', 'github',)))[0]['fields']
if uid == None:
r_uid = uuid.uuid4().hex
uid = re.sub('-', '', r_uid)
id = id + '/posts/' + uid + "/"
comments_id = id + "comments/"
published = timezone.now()
posts = Post(pk=uid, id=id, author_id=author_id, author=author, title=title, source=source, origin=origin, description=descirption, contentType=contentType, count=0, size=10, categories=categories,visibility=visibility, unlisted=unlisted, published=published, content=content, comments=comments_id)
posts.save()
return True
else:
print(request.data)
print(form.errors)
print(form.data)
return False
def add_Comment(request, post_pk, auth_pk, uid=None):
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
updateForeignAuthors()
published = timezone.now()
contentType = form.cleaned_data['contentType']
if contentType == "application/app":
content = request.FILES['file'].read() #Inputfile
elif contentType in ["image/png", "image/jpeg",]:
content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
content = form.cleaned_data["text"]
author_id = json.loads(serializers.serialize('json', Author.objects.filter(email=auth_pk), fields=('type', 'id', 'host', 'displayName', 'url', 'github',)))[0]['fields']
post = Post.objects.get(pk = post_pk)
post_pk_str = post_pk
if uid == None:
r_uid = uuid.uuid4().hex
uid = re.sub('-', '', r_uid)
comment_id = getattr(post, 'comments') + uid
comments = Comments(pk=uid, id=comment_id, Post_pk=post, Post_pk_str = post_pk_str, auth_pk_str = auth_pk, author=author_id, size=10, published=published, contentType=contentType, content=content)
comments.save()
return True
else:
print(request.data)
return False
@api_view(['GET',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostLikesView(request, post_pk, auth_pk):
post = Post.objects.get(post_pk = post_pk)
author = Author.objects.get(pk = auth_pk)
likeObjs = Like.objects.filter(~Q(auth_pk = author), object = post.id)
Likes = LikeSerializer(likeObjs, read_only=True, many=True)
likes = []
for l in Likes.data:
like = {}
for key in l:
if(key != "context"):
like[key] = l[key]
like["@context"] = l["context"]
like["author"] = json.loads(django.core.serializers.serialize('json', Author.objects.filter(id=l["author"]), fields=('type', 'id', 'displayName', 'host', 'url', 'github',)))[0]['fields']
likes.append(like)
response_dict = {
"type": "likes",
"items": likes
}
return Response(response_dict)
@api_view(['GET', 'POST',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostsList(request, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
if auth_pk:
try:
author = Author.objects.get(auth_pk=auth_pk)
posts = Post.objects.filter(author_id=author, id__icontains = "linkedspace")
code = status.HTTP_200_OK
paginator = Paginator(posts, page_size)
page_obj = paginator.get_page(page_number)
data = PostSerializer(page_obj.object_list, many=True).data
except Exception as e:
print(e)
data = {}
code = status.HTTP_400_BAD_REQUEST
else:
code = status.HTTP_200_OK
posts = Post.objects.filter(id__icontains = "linkedspace")
paginator = Paginator(posts, page_size)
page_obj = paginator.get_page(page_number)
data = PostSerializer(page_obj.object_list, many=True).data
elif request.method == 'POST':
if newPost(request, auth_pk=request.data['auth_pk']):
code = status.HTTP_201_CREATED
post = Post.objects.latest("published")
data = PostSerializer(post).data
else:
code = status.HTTP_400_BAD_REQUEST
data = {}
return Response(data, code)
@api_view(['GET', 'POST',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def commentListView(request, post_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
comments = Comments.objects.filter(Post_pk_str=post_pk)
post = Post.objects.get(pk=post_pk)
post_id = getattr(post, 'id')
comment_id = getattr(post, 'comments')
paginator = Paginator(comments, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
response_dict = {
"type": "comments",
"page": page_number,
"size": page_size,
"post": post_id,
"id": comment_id,
"comments": serializer.data,
}
return Response(response_dict)
elif request.method == 'POST':
if add_Comment(request, post_pk=request.data['Post_pk'], auth_pk=request.data['auth_pk']):
code = status.HTTP_202_ACCEPTED
comment = Comments.objects.latest("published")
data = CommentSerializer(comment).data
else:
code = status.HTTP_400_BAD_REQUEST
data = {}
return Response(data, code)
@api_view(['GET', 'POST', 'PUT', 'DELETE', ])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostDetail(request, post_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
try:
code = status.HTTP_200_OK
post = Post.objects.get(post_pk=post_pk)
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'POST':
try:
code = status.HTTP_200_OK
post = Post.objects.get(post_pk=post_pk)
if 'title' in request.data.keys():
post.title = request.data['title']
if 'description' in request.data.keys():
post.description = request.data['description']
if 'categories' in request.data.keys():
post.categories = request.data['categories'].split(' ')
if 'visibility' in request.data.keys():
post.visibility = request.data['visibility']
if 'unlisted' in request.data.keys():
post.unlisted = request.data['unlisted']
if 'contentType' in request.data.keys():
post.contentType = request.data['contentType']
if post.contentType == "application/app":
post.content = request.FILES['file'].read() #Inputfile
elif post.contentType in ["image/png", "image/jpeg",]:
post.content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
post.content = request.data["text"]
post.save()
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'PUT':
try:
code = status.HTTP_201_CREATED
assert newPost(request, post_pk, request.data['auth_pk'])==True
post = Post.objects.get(post_pk=post_pk)
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'DELETE':
try:
post = Post.objects.get(post_pk=post_pk)
post.delete()
code = status.HTTP_200_OK
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
return Response(serializer.data, code)
@api_view(['GET', 'POST', ])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def commentDetail(request, post_pk, comment_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
try:
code = status.HTTP_200_OK
comment = Comments.objects.get(pk=comment_pk)
serializer = CommentSerializer(comment)
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
comment = Comments.objects.all()
paginator = Paginator(comment, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
elif request.method == 'POST':
try:
code = status.HTTP_200_OK
comment = Comments.objects.get(pk=comment_pk)
if 'contentType' in request.data.keys():
comment.contentType = request.data['contentType']
if 'text' in request.data.keys():
comment.content = request.data['text']
comment.save()
serializer = CommentSerializer(comment)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
comment = Comments.objects.all()
paginator = Paginator(comment, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
return Response(serializer.data, code)
@api_view(['GET',])
def connection(request, auth_id=None):
data = []
team3 = get('https://social-dis.herokuapp.com/posts', auth=('socialdistribution_t03','c404t03'))
if team3.status_code == 200:
data.append(team3.json())
team15 = get('https://unhindled.herokuapp.com/service/allposts/', auth=('connectionsuperuser','404connection'))
if team15.status_code == 200:
data.append(team15.json())
team17 = get('https://cmput404f21t17.herokuapp.com/service/connect/public/', auth=('4cbe2def-feaa-4bb7-bce5-<PASSWORD>','123456'))
if team17.status_code == 200:
data.append(team17.json())
return Response({'connection': data})
| [
"rest_framework.decorators.permission_classes",
"Posts.commentModel.Comments.objects.all",
"rest_framework.decorators.authentication_classes",
"requests.get",
"Posts.commentModel.Comments.objects.get",
"django.utils.timezone.now",
"Author.serializers.LikeSerializer",
"rest_framework.response.Response",
"Posts.commentModel.Comments",
"Posts.commentModel.Comments.objects.filter",
"uuid.uuid4",
"Author.views.updateForeignAuthors",
"Posts.commentForm.CommentForm",
"re.sub",
"django.db.models.Q",
"rest_framework.decorators.api_view",
"Posts.commentModel.Comments.objects.latest",
"django.core.paginator.Paginator"
] | [((4040, 4057), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (4048, 4057), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((4060, 4106), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['[CustomAuthentication]'], {}), '([CustomAuthentication])\n', (4082, 4106), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((4108, 4146), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[AccessPermission]'], {}), '([AccessPermission])\n', (4126, 4146), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((4957, 4982), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (4965, 4982), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((4985, 5031), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['[CustomAuthentication]'], {}), '([CustomAuthentication])\n', (5007, 5031), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((5033, 5071), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[AccessPermission]'], {}), '([AccessPermission])\n', (5051, 5071), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((6510, 6535), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (6518, 6535), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((6538, 6584), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['[CustomAuthentication]'], {}), '([CustomAuthentication])\n', (6560, 6584), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((6586, 6624), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[AccessPermission]'], {}), '([AccessPermission])\n', (6604, 6624), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((7950, 7992), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST', 'PUT', 'DELETE']"], {}), "(['GET', 'POST', 'PUT', 'DELETE'])\n", (7958, 7992), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((7996, 8042), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['[CustomAuthentication]'], {}), '([CustomAuthentication])\n', (8018, 8042), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((8044, 8082), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[AccessPermission]'], {}), '([AccessPermission])\n', (8062, 8082), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((11453, 11478), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (11461, 11478), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((11482, 11528), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['[CustomAuthentication]'], {}), '([CustomAuthentication])\n', (11504, 11528), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((11530, 11568), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[AccessPermission]'], {}), '([AccessPermission])\n', (11548, 11568), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((13166, 13183), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (13174, 13183), False, 'from rest_framework.decorators import api_view, authentication_classes, permission_classes\n'), ((2801, 2841), 'Posts.commentForm.CommentForm', 'CommentForm', (['request.POST', 'request.FILES'], {}), '(request.POST, request.FILES)\n', (2812, 2841), False, 'from Posts.commentForm import CommentForm\n'), ((4374, 4425), 'Author.serializers.LikeSerializer', 'LikeSerializer', (['likeObjs'], {'read_only': '(True)', 'many': '(True)'}), '(likeObjs, read_only=True, many=True)\n', (4388, 4425), False, 'from Author.serializers import LikeSerializer\n'), ((4930, 4953), 'rest_framework.response.Response', 'Response', (['response_dict'], {}), '(response_dict)\n', (4938, 4953), False, 'from rest_framework.response import Response\n'), ((6487, 6507), 'rest_framework.response.Response', 'Response', (['data', 'code'], {}), '(data, code)\n', (6495, 6507), False, 'from rest_framework.response import Response\n'), ((11419, 11450), 'rest_framework.response.Response', 'Response', (['serializer.data', 'code'], {}), '(serializer.data, code)\n', (11427, 11450), False, 'from rest_framework.response import Response\n'), ((13107, 13138), 'rest_framework.response.Response', 'Response', (['serializer.data', 'code'], {}), '(serializer.data, code)\n', (13115, 13138), False, 'from rest_framework.response import Response\n'), ((13251, 13345), 'requests.get', 'get', (['"""https://social-dis.herokuapp.com/posts"""'], {'auth': "('socialdistribution_t03', 'c404t03')"}), "('https://social-dis.herokuapp.com/posts', auth=(\n 'socialdistribution_t03', 'c404t03'))\n", (13254, 13345), False, 'from requests import get\n'), ((13421, 13529), 'requests.get', 'get', (['"""https://unhindled.herokuapp.com/service/allposts/"""'], {'auth': "('connectionsuperuser', '404connection')"}), "('https://unhindled.herokuapp.com/service/allposts/', auth=(\n 'connectionsuperuser', '404connection'))\n", (13424, 13529), False, 'from requests import get\n'), ((13607, 13734), 'requests.get', 'get', (['"""https://cmput404f21t17.herokuapp.com/service/connect/public/"""'], {'auth': "('4cbe2def-feaa-4bb7-bce5-<PASSWORD>', '123456')"}), "('https://cmput404f21t17.herokuapp.com/service/connect/public/', auth=(\n '4cbe2def-feaa-4bb7-bce5-<PASSWORD>', '123456'))\n", (13610, 13734), False, 'from requests import get\n'), ((13810, 13840), 'rest_framework.response.Response', 'Response', (["{'connection': data}"], {}), "({'connection': data})\n", (13818, 13840), False, 'from rest_framework.response import Response\n'), ((2260, 2274), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2272, 2274), False, 'from django.utils import timezone\n'), ((2874, 2896), 'Author.views.updateForeignAuthors', 'updateForeignAuthors', ([], {}), '()\n', (2894, 2896), False, 'from Author.views import updateForeignAuthors, GetForeignAuthors\n'), ((2917, 2931), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2929, 2931), False, 'from django.utils import timezone\n'), ((3738, 3927), 'Posts.commentModel.Comments', 'Comments', ([], {'pk': 'uid', 'id': 'comment_id', 'Post_pk': 'post', 'Post_pk_str': 'post_pk_str', 'auth_pk_str': 'auth_pk', 'author': 'author_id', 'size': '(10)', 'published': 'published', 'contentType': 'contentType', 'content': 'content'}), '(pk=uid, id=comment_id, Post_pk=post, Post_pk_str=post_pk_str,\n auth_pk_str=auth_pk, author=author_id, size=10, published=published,\n contentType=contentType, content=content)\n', (3746, 3927), False, 'from Posts.commentModel import Comments\n'), ((6886, 6930), 'Posts.commentModel.Comments.objects.filter', 'Comments.objects.filter', ([], {'Post_pk_str': 'post_pk'}), '(Post_pk_str=post_pk)\n', (6909, 6930), False, 'from Posts.commentModel import Comments\n'), ((7080, 7110), 'django.core.paginator.Paginator', 'Paginator', (['comments', 'page_size'], {}), '(comments, page_size)\n', (7089, 7110), False, 'from django.core.paginator import Paginator\n'), ((7499, 7522), 'rest_framework.response.Response', 'Response', (['response_dict'], {}), '(response_dict)\n', (7507, 7522), False, 'from rest_framework.response import Response\n'), ((2137, 2159), 're.sub', 're.sub', (['"""-"""', '""""""', 'r_uid'], {}), "('-', '', r_uid)\n", (2143, 2159), False, 'import re\n'), ((3643, 3665), 're.sub', 're.sub', (['"""-"""', '""""""', 'r_uid'], {}), "('-', '', r_uid)\n", (3649, 3665), False, 'import re\n'), ((4322, 4339), 'django.db.models.Q', 'Q', ([], {'auth_pk': 'author'}), '(auth_pk=author)\n', (4323, 4339), False, 'from django.db.models import Q\n'), ((5999, 6026), 'django.core.paginator.Paginator', 'Paginator', (['posts', 'page_size'], {}), '(posts, page_size)\n', (6008, 6026), False, 'from django.core.paginator import Paginator\n'), ((7927, 7947), 'rest_framework.response.Response', 'Response', (['data', 'code'], {}), '(data, code)\n', (7935, 7947), False, 'from rest_framework.response import Response\n'), ((11894, 11929), 'Posts.commentModel.Comments.objects.get', 'Comments.objects.get', ([], {'pk': 'comment_pk'}), '(pk=comment_pk)\n', (11914, 11929), False, 'from Posts.commentModel import Comments\n'), ((2102, 2114), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2112, 2114), False, 'import uuid\n'), ((3608, 3620), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3618, 3620), False, 'import uuid\n'), ((5552, 5579), 'django.core.paginator.Paginator', 'Paginator', (['posts', 'page_size'], {}), '(posts, page_size)\n', (5561, 5579), False, 'from django.core.paginator import Paginator\n'), ((7732, 7768), 'Posts.commentModel.Comments.objects.latest', 'Comments.objects.latest', (['"""published"""'], {}), "('published')\n", (7755, 7768), False, 'from Posts.commentModel import Comments\n'), ((8622, 8648), 'django.core.paginator.Paginator', 'Paginator', (['post', 'page_size'], {}), '(post, page_size)\n', (8631, 8648), False, 'from django.core.paginator import Paginator\n'), ((12102, 12124), 'Posts.commentModel.Comments.objects.all', 'Comments.objects.all', ([], {}), '()\n', (12122, 12124), False, 'from Posts.commentModel import Comments\n'), ((12149, 12178), 'django.core.paginator.Paginator', 'Paginator', (['comment', 'page_size'], {}), '(comment, page_size)\n', (12158, 12178), False, 'from django.core.paginator import Paginator\n'), ((12419, 12454), 'Posts.commentModel.Comments.objects.get', 'Comments.objects.get', ([], {'pk': 'comment_pk'}), '(pk=comment_pk)\n', (12439, 12454), False, 'from Posts.commentModel import Comments\n'), ((10202, 10228), 'django.core.paginator.Paginator', 'Paginator', (['post', 'page_size'], {}), '(post, page_size)\n', (10211, 10228), False, 'from django.core.paginator import Paginator\n'), ((11260, 11286), 'django.core.paginator.Paginator', 'Paginator', (['post', 'page_size'], {}), '(post, page_size)\n', (11269, 11286), False, 'from django.core.paginator import Paginator\n'), ((12875, 12897), 'Posts.commentModel.Comments.objects.all', 'Comments.objects.all', ([], {}), '()\n', (12895, 12897), False, 'from Posts.commentModel import Comments\n'), ((12922, 12951), 'django.core.paginator.Paginator', 'Paginator', (['comment', 'page_size'], {}), '(comment, page_size)\n', (12931, 12951), False, 'from django.core.paginator import Paginator\n'), ((10785, 10811), 'django.core.paginator.Paginator', 'Paginator', (['post', 'page_size'], {}), '(post, page_size)\n', (10794, 10811), False, 'from django.core.paginator import Paginator\n')] |
import unittest
from worldengine.plates import Step, center_land, world_gen
from worldengine.world import World
from tests.draw_test import TestBase
class TestGeneration(TestBase):
def setUp(self):
super(TestGeneration, self).setUp()
def test_world_gen_does_not_explode_badly(self):
# FIXME remove me when proper tests are in place
# Very stupid test that just verify nothing explode badly
world_gen("Dummy", 32, 16, 1, step=Step.get_by_name("full"))
@staticmethod
def _mean_elevation_at_borders(world):
borders_total_elevation = 0.0
for y in range(world.height):
borders_total_elevation += world.elevation_at((0, y))
borders_total_elevation += world.elevation_at((world.width - 1, y))
for x in range(1, world.width - 1):
borders_total_elevation += world.elevation_at((x, 0))
borders_total_elevation += world.elevation_at((x, world.height - 1))
n_cells_on_border = world.width * 2 + world.height * 2 - 4
return borders_total_elevation / n_cells_on_border
def test_center_land(self):
w = World.from_pickle_file("%s/plates_279.world" % self.tests_data_dir)
# We want to have less land than before at the borders
el_before = TestGeneration._mean_elevation_at_borders(w)
center_land(w)
el_after = TestGeneration._mean_elevation_at_borders(w)
self.assertTrue(el_after <= el_before)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"worldengine.plates.center_land",
"worldengine.plates.Step.get_by_name",
"worldengine.world.World.from_pickle_file"
] | [((1507, 1522), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1520, 1522), False, 'import unittest\n'), ((1143, 1210), 'worldengine.world.World.from_pickle_file', 'World.from_pickle_file', (["('%s/plates_279.world' % self.tests_data_dir)"], {}), "('%s/plates_279.world' % self.tests_data_dir)\n", (1165, 1210), False, 'from worldengine.world import World\n'), ((1348, 1362), 'worldengine.plates.center_land', 'center_land', (['w'], {}), '(w)\n', (1359, 1362), False, 'from worldengine.plates import Step, center_land, world_gen\n'), ((470, 494), 'worldengine.plates.Step.get_by_name', 'Step.get_by_name', (['"""full"""'], {}), "('full')\n", (486, 494), False, 'from worldengine.plates import Step, center_land, world_gen\n')] |
from tensorflow.keras import *
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Sequential,regularizers
from tensorflow.keras.layers import Dropout
# from tensorflow.keras import *
# 定义一个3x3卷积!kernel_initializer='he_normal','glorot_normal'
from tensorflow.python.keras.layers import Concatenate
def regularized_padded_conv(*args, **kwargs):
return layers.Conv2D(*args, **kwargs, padding='same', use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(5e-4))
############################### 通道注意力机制 ###############################
class ChannelAttention(layers.Layer):
def __init__(self, in_planes, ratio=8):
super(ChannelAttention, self).__init__()
self.avg= layers.GlobalAveragePooling2D()
self.max= layers.GlobalMaxPooling2D()
self.conv1 = layers.Conv2D(in_planes//ratio, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True, activation=tf.nn.relu)
self.conv2 = layers.Conv2D(in_planes, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True)
def call(self, inputs):
avg = self.avg(inputs)
max = self.max(inputs)
avg = layers.Reshape((1, 1, avg.shape[1]))(avg) # shape (None, 1, 1 feature)
max = layers.Reshape((1, 1, max.shape[1]))(max) # shape (None, 1, 1 feature)
avg_out = self.conv2(self.conv1(avg))
max_out = self.conv2(self.conv1(max))
out = avg_out + max_out
out = tf.nn.sigmoid(out)
return out
############################### 空间注意力机制 ###############################
class SpatialAttention(layers.Layer):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = regularized_padded_conv(1, kernel_size=kernel_size, strides=1, activation=tf.nn.sigmoid)
def call(self, inputs):
avg_out = tf.reduce_mean(inputs, axis=3)
max_out = tf.reduce_max(inputs, axis=3)
out = tf.stack([avg_out, max_out], axis=3) # 创建一个维度,拼接到一起concat。
out = self.conv1(out)
return out
class BasicBlock(layers.Layer):
def __init__(self, filter_num, stride=1):
super(BasicBlock, self).__init__()
# self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same', kernel_initializer='he_normal',kernel_regularizer=keras.regularizers.l2(5e-4))
self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same',kernel_regularizer=regularizers.l2(0.0001)) #kernel_initializer='he_normal',
self.bn1 = layers.BatchNormalization()
self.relu = layers.Activation('relu')
self.conv2 = layers.Conv2D(filter_num, (3, 3), strides=1, padding='same',kernel_regularizer=regularizers.l2(0.0001))
self.bn2 = layers.BatchNormalization()
############################### 注意力机制 ###############################
self.ca = ChannelAttention(filter_num)
self.sa = SpatialAttention()
if stride != 1:
self.downsample = Sequential()
self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride))
else:
self.downsample = lambda x:x
def call(self, inputs, training=None):
# [b, h, w, c]
out = self.conv1(inputs)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
############################### 注意力机制 ###############################
out = self.ca(out) * out
out = self.sa(out) * out
identity = self.downsample(inputs)
output = layers.add([out, identity])
output = tf.nn.relu(output)
return output
######################################
class build_resblock(keras.Model):
def __init__(self, filter_num, stride):
super(build_resblock, self).__init__()
self.BasicBlock1 = BasicBlock(filter_num, stride)
self.BasicBlock2 = BasicBlock(filter_num, stride=1)
def call(self,blocks):
res_blocks = Sequential()
res_blocks.add(self.BasicBlock1)
for _ in range(1, blocks):
res_blocks.add(self.BasicBlock2)
return res_blocks
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
######################################
class ResNet(keras.Model):
def __init__(self, layer_dims, num_classes=16): # [2, 2, 2, 2]
super(ResNet, self).__init__()
self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
])
self.layer1 = self.build_resblock(64, layer_dims[0])
self.layer2 = self.build_resblock(128, layer_dims[1], stride=1)
self.layer3 = self.build_resblock(256, layer_dims[2], stride=1)
self.layer4 = self.build_resblock(512, layer_dims[3], stride=1)
# output: [b, 512, h, w],
self.avgpool = layers.GlobalAveragePooling2D()
self.fc = layers.Dense(num_classes)
def call(self, inputs, training=None):
x = self.stem(inputs)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# [b, c]
x = self.avgpool(x)
# [b, 100]
x = self.fc(x)
return x
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
def resnet18():
return ResNet([2, 2, 2, 2],num_classes=9)
def resnet34():
return ResNet([3, 4, 6, 3],num_classes=9)
########################### pp2主模型 ########################################
class pp2_model(keras.Model):
def __init__(self,filters_num,layer_dims,num_classes,dropout_rate):
super(pp2_model, self).__init__()
self.conv1 = layers.Conv3D(filters_num[0],kernel_size=(3,3,7),padding='same') # filters_num = 8
self.bn1 = layers.BatchNormalization()
self.relu1 = layers.Activation('relu')
self.conv2 = layers.Conv3D(filters_num[1],kernel_size=(3,3,5),padding='same') # filters_num = 16
self.bn2 = layers.BatchNormalization()
self.relu2 = layers.Activation('relu')
self.conv3 = layers.Conv3D(filters_num[2], kernel_size=(3, 3, 3), padding='same') # filters_num = 32
self.bn3 = layers.BatchNormalization()
self.relu3 = layers.Activation('relu')
# self.reshape = layers.Reshape()
self.conv4 = layers.Conv2D(filters_num[3], kernel_size=(3, 3), padding='same') # filters_num = 64
self.bn4 = layers.BatchNormalization()
self.relu4 = layers.Activation('relu')
self.conv5 = layers.Conv2D(filters_num[4], kernel_size=(3, 3), padding='same') # filters_num = **
self.bn5 = layers.BatchNormalization()
self.relu5 = layers.Activation('relu')
self.dpout = layers.Dropout(dropout_rate)
self.layer1 = self.build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64
self.layer2 = self.build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128
self.layer3 = self.build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256
self.layer4 = self.build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512
# output: [b, 512, h, w],
# self.fc1 = layers.Flatten()
self.avgpool = layers.GlobalAveragePooling2D()
self.fc2 = layers.Dense(filters_num[7],activation='relu')
self.fc3 = layers.Dense(filters_num[6],activation='relu')
self.fc4 = layers.Dense(num_classes)
def call(self,inputs,training=None):
out = self.conv1(inputs)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu3(out)
# reshape
out = layers.Reshape((out.shape[1],out.shape[2],out.shape[3] * out.shape[4]))(out)
out = self.conv4(out)
out = self.bn4(out)
out = self.relu4(out)
out = self.dpout(out)
out = self.conv5(out)
out = self.bn5(out)
out = self.dpout(out)
out = self.relu5(out)
x = self.layer1(out)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# [b, c]
x = self.avgpool(x)
# [b, 100]
x = self.fc2(x)
x = self.dpout(x)
x = self.fc3(x)
x = self.fc4(x)
return x
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
class ResNet_block(keras.Model):
def __init__(self, layer_dims,filters_num): # [2, 2, 2, 2]
super(ResNet_block, self).__init__()
#
# self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
# layers.BatchNormalization(),
# layers.Activation('relu'),
# layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
# ])
self.layer1 = self.build_resblock(filters_num[0], layer_dims[0]) # filters_num = 64
self.layer2 = self.build_resblock(filters_num[1], layer_dims[1], stride=1) # filters_num = 128
self.layer3 = self.build_resblock(filters_num[2], layer_dims[2], stride=1) # filters_num = 256
self.layer4 = self.build_resblock(filters_num[3], layer_dims[3], stride=1) # filters_num = 512
# output: [b, 512, h, w],
# self.avgpool = layers.GlobalAveragePooling2D()
# self.fc = layers.Dense(num_classes)
def call(self, inputs, training=None):
# x = self.stem(inputs)
x1 = self.layer1(inputs)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
# [b, c]
# x = self.avgpool(x)
# [b, 100]
# x = self.fc(x)
return x2,x4
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
def network_up(input_layer_up,filters_num,dropout_rate,Block_res):
# input_layer = Input(input_shape)
# conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 7), padding='same')(input_layer) # filters_num = 8
# conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3),padding='same',kernel_initializer='he_normal',kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # filters_num = 8
conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3), padding='same',
kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) #kernel_initializer='he_normal',
# conv_layer1m = tf.keras.layers.MaxPooling3D(pool_size=(1, 1, 1),padding='same')(conv1)
# conv_layer1g = tf.keras.layers.GlobalMaxPooling3D()(conv1)
conv1_bn = layers.BatchNormalization()(conv1)
conv1_relu = layers.Activation('relu')(conv1_bn)
# conv1_relu = Dropout(0.5)(conv1_relu)
# conv1_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv1_relu)
# conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv1_relu) # filters_num = 16
conv2_bn = layers.BatchNormalization()(conv2)
conv2_relu = layers.Activation('relu')(conv2_bn)
# conv2_relu = Dropout(0.5)(conv2_relu)
# conv2_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv2_relu)
conv3 = layers.Conv3D(filters_num[2], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv2_relu) # filters_num = 32
conv3_bn = layers.BatchNormalization()(conv3)
conv3_relu = layers.Activation('relu')(conv3_bn)
# conv3_relu = Dropout(0.5)(conv3_relu)
# conv3_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv3_relu)
conv3_relu_reshape = layers.Reshape((conv3_relu.shape[1],conv3_relu.shape[2],conv3_relu.shape[3]*conv3_relu.shape[4]))(conv3_relu)
conv3_relu_reshape = Dropout(0.5)(conv3_relu_reshape)
##################第二个尺度#########################
# conv11 = layers.Conv3D(filters_num[0], kernel_size=(5, 5, 3), padding='same',
# kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up)
# conv11_bn = layers.BatchNormalization()(conv11)
# conv11_relu = layers.Activation('relu')(conv11_bn)
#
# # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
# conv22 = layers.Conv3D(filters_num[1], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv11_relu) # filters_num = 16
# conv22_bn = layers.BatchNormalization()(conv22)
# conv22_relu = layers.Activation('relu')(conv22_bn)
#
# conv33 = layers.Conv3D(filters_num[2], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv22_relu) # filters_num = 32
# conv33_bn = layers.BatchNormalization()(conv33)
# conv33_relu = layers.Activation('relu')(conv33_bn)
#
# conv33_relu_reshape = layers.Reshape(
# (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv33_relu)
####################################################
# conv111 = layers.Conv3D(filters_num[0], kernel_size=(7, 7, 3), padding='same',
# kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up)
# conv111_bn = layers.BatchNormalization()(conv111)
# conv111_relu = layers.Activation('relu')(conv111_bn)
#
# # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
# conv222 = layers.Conv3D(filters_num[1], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv111_relu) # filters_num = 16
# conv222_bn = layers.BatchNormalization()(conv222)
# conv222_relu = layers.Activation('relu')(conv222_bn)
#
# conv333 = layers.Conv3D(filters_num[2], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv222_relu) # filters_num = 32
# conv333_bn = layers.BatchNormalization()(conv333)
# conv333_relu = layers.Activation('relu')(conv333_bn)
#
# conv333_relu_reshape = layers.Reshape(
# (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv333_relu)
#################concatenate########################
# conv33333_relu_reshape = Concatenate(axis=-1)([conv3_relu_reshape, conv33_relu_reshape])
#########################################
conv4 = layers.Conv2D(filters_num[3], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv3_relu_reshape) # filters_num = 64
conv4_bn = layers.BatchNormalization()(conv4)
conv4_relu = layers.Activation('relu')(conv4_bn)
# conv4_relu = Dropout(0.5)(conv4_relu)
# conv4_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv4_relu)
# conv4_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv4_relu)
conv5 = layers.Conv2D(filters_num[4], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv4_relu) # filters_num = **
conv5_bn = layers.BatchNormalization()(conv5)
conv5_relu = layers.Activation('relu')(conv5_bn)
# conv5_relu = Dropout(0.5)(conv5_relu)
# conv5_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv5_relu)
# conv5_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv5_relu)
# conv5_dpout = layers.Dropout(dropout_rate)(conv5)
# conv5_reshape = layers.Reshape((conv5_dpout.shape[1],conv5_dpout.shape[2],conv5_dpout.shape[3]))(conv5_dpout)
outputs2,outputs4 = Block_res(conv5_relu)
return conv5,outputs2,outputs4
# layer1 = build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64
# layer2 = build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128
# layer3 = build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256
# layer4 = build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512
| [
"tensorflow.keras.layers.Conv3D",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.nn.relu",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.layers.add",
"tensorflow.reduce_max",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.nn.sigmoid",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.reduce_mean",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.stack",
"tensorflow.keras.regularizers.l2"
] | [((4670, 4682), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (4680, 4682), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((831, 862), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), '()\n', (860, 862), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((882, 909), 'tensorflow.keras.layers.GlobalMaxPooling2D', 'layers.GlobalMaxPooling2D', ([], {}), '()\n', (907, 909), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((1789, 1807), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['out'], {}), '(out)\n', (1802, 1807), True, 'import tensorflow as tf\n'), ((2197, 2227), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inputs'], {'axis': '(3)'}), '(inputs, axis=3)\n', (2211, 2227), True, 'import tensorflow as tf\n'), ((2247, 2276), 'tensorflow.reduce_max', 'tf.reduce_max', (['inputs'], {'axis': '(3)'}), '(inputs, axis=3)\n', (2260, 2276), True, 'import tensorflow as tf\n'), ((2292, 2328), 'tensorflow.stack', 'tf.stack', (['[avg_out, max_out]'], {'axis': '(3)'}), '([avg_out, max_out], axis=3)\n', (2300, 2328), True, 'import tensorflow as tf\n'), ((2906, 2933), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2931, 2933), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((2955, 2980), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2972, 2980), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((3129, 3156), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3154, 3156), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((3980, 4007), 'tensorflow.keras.layers.add', 'layers.add', (['[out, identity]'], {}), '([out, identity])\n', (3990, 4007), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((4026, 4044), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {}), '(output)\n', (4036, 4044), True, 'import tensorflow as tf\n'), ((4417, 4429), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (4427, 4429), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((5738, 5769), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), '()\n', (5767, 5769), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((5789, 5814), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['num_classes'], {}), '(num_classes)\n', (5801, 5814), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((6214, 6226), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (6224, 6226), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((6827, 6895), 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', (['filters_num[0]'], {'kernel_size': '(3, 3, 7)', 'padding': '"""same"""'}), "(filters_num[0], kernel_size=(3, 3, 7), padding='same')\n", (6840, 6895), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((6932, 6959), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6957, 6959), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((6982, 7007), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (6999, 7007), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7032, 7100), 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', (['filters_num[1]'], {'kernel_size': '(3, 3, 5)', 'padding': '"""same"""'}), "(filters_num[1], kernel_size=(3, 3, 5), padding='same')\n", (7045, 7100), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7138, 7165), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (7163, 7165), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7188, 7213), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (7205, 7213), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7238, 7306), 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', (['filters_num[2]'], {'kernel_size': '(3, 3, 3)', 'padding': '"""same"""'}), "(filters_num[2], kernel_size=(3, 3, 3), padding='same')\n", (7251, 7306), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7348, 7375), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (7373, 7375), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7398, 7423), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (7415, 7423), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7491, 7556), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['filters_num[3]'], {'kernel_size': '(3, 3)', 'padding': '"""same"""'}), "(filters_num[3], kernel_size=(3, 3), padding='same')\n", (7504, 7556), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7598, 7625), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (7623, 7625), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7648, 7673), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (7665, 7673), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7698, 7763), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['filters_num[4]'], {'kernel_size': '(3, 3)', 'padding': '"""same"""'}), "(filters_num[4], kernel_size=(3, 3), padding='same')\n", (7711, 7763), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7805, 7832), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (7830, 7832), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7855, 7880), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (7872, 7880), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((7903, 7931), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (7917, 7931), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((8446, 8477), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), '()\n', (8475, 8477), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((8498, 8545), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['filters_num[7]'], {'activation': '"""relu"""'}), "(filters_num[7], activation='relu')\n", (8510, 8545), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((8565, 8612), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['filters_num[6]'], {'activation': '"""relu"""'}), "(filters_num[6], activation='relu')\n", (8577, 8612), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((8632, 8657), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['num_classes'], {}), '(num_classes)\n', (8644, 8657), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((9747, 9759), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (9757, 9759), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((11455, 11467), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (11465, 11467), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((12477, 12504), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (12502, 12504), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((12530, 12555), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (12547, 12555), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((13018, 13045), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (13043, 13045), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((13071, 13096), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (13088, 13096), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((13443, 13470), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (13468, 13470), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((13496, 13521), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (13513, 13521), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((13722, 13828), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu\n .shape[4])'], {}), '((conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[\n 3] * conv3_relu.shape[4]))\n', (13736, 13828), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((13858, 13870), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (13865, 13870), False, 'from tensorflow.keras.layers import Dropout\n'), ((16980, 17007), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (17005, 17007), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((17033, 17058), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (17050, 17058), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((17514, 17541), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (17539, 17541), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((17567, 17592), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (17584, 17592), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((582, 605), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (597, 605), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((1486, 1522), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(1, 1, avg.shape[1])'], {}), '((1, 1, avg.shape[1]))\n', (1500, 1522), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((1574, 1610), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(1, 1, max.shape[1])'], {}), '((1, 1, max.shape[1]))\n', (1588, 1610), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((3382, 3394), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (3392, 3394), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((9022, 9095), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(out.shape[1], out.shape[2], out.shape[3] * out.shape[4])'], {}), '((out.shape[1], out.shape[2], out.shape[3] * out.shape[4]))\n', (9036, 9095), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((1061, 1084), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (1076, 1084), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((1302, 1325), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (1317, 1325), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((2827, 2850), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.0001)'], {}), '(0.0001)\n', (2842, 2850), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((3084, 3107), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.0001)'], {}), '(0.0001)\n', (3099, 3107), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((3428, 3477), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['filter_num', '(1, 1)'], {'strides': 'stride'}), '(filter_num, (1, 1), strides=stride)\n', (3441, 3477), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((5092, 5133), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {'strides': '(1, 1)'}), '(64, (3, 3), strides=(1, 1))\n', (5105, 5133), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((5168, 5195), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (5193, 5195), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((5230, 5255), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (5247, 5255), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((5290, 5356), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(1, 1)', 'padding': '"""same"""'}), "(pool_size=(2, 2), strides=(1, 1), padding='same')\n", (5306, 5356), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((12226, 12249), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.0001)'], {}), '(0.0001)\n', (12241, 12249), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((12945, 12968), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.0001)'], {}), '(0.0001)\n', (12960, 12968), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((13370, 13393), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.0001)'], {}), '(0.0001)\n', (13385, 13393), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((16899, 16922), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.0001)'], {}), '(0.0001)\n', (16914, 16922), False, 'from tensorflow.keras import layers, Sequential, regularizers\n'), ((17441, 17464), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.0001)'], {}), '(0.0001)\n', (17456, 17464), False, 'from tensorflow.keras import layers, Sequential, regularizers\n')] |
"""Bioconductor run git transition code.
This module assembles the classes for the SVN --> Git transition
can be run in a sequential manner.
It runs the following aspects fo the Bioconductor transition.
Note: Update the SVN dump
1. Run Bioconductor Software package transition
2. Run Bioconductor Experiment Data package transition
3. Run Workflow package transition
4. Run Manifest file transition
5. Run Rapid update of master (trunk) and RELEASE_3_5 branches on
software packages
Manual tasks which need to be done:
1. Copy over bare repos to repositories/packages
2. Copy manifest bare git repo to repositories/admin
"""
import src.run_transition as rt
import src.svn_dump_update as sdu
import logging
import time
logging.basicConfig(filename='transition.log',
format='%(levelname)s %(asctime)s %(message)s',
level=logging.DEBUG)
def svn_dump_update(config_file):
sdu.svn_root_update(config_file)
sdu.svn_experiment_root_update(config_file)
return
def run(config_file):
rt.run_software_transition(config_file, new_svn_dump=True)
rt.run_experiment_data_transition(config_file, new_svn_dump=True)
rt.run_workflow_transition(config_file, new_svn_dump=True)
rt.run_manifest_transition(config_file, new_svn_dump=True)
return
if __name__ == '__main__':
start_time = time.time()
config_file = "./settings.ini"
svn_dump_update(config_file)
run(config_file)
# TODO: Run updates after dump update
svn_dump_update(config_file)
rt.run_updates(config_file)
logging.info("--- %s seconds ---" % (time.time() - start_time))
| [
"logging.basicConfig",
"src.run_transition.run_software_transition",
"src.svn_dump_update.svn_experiment_root_update",
"src.run_transition.run_experiment_data_transition",
"src.run_transition.run_workflow_transition",
"src.run_transition.run_updates",
"src.run_transition.run_manifest_transition",
"time.time",
"src.svn_dump_update.svn_root_update"
] | [((727, 847), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""transition.log"""', 'format': '"""%(levelname)s %(asctime)s %(message)s"""', 'level': 'logging.DEBUG'}), "(filename='transition.log', format=\n '%(levelname)s %(asctime)s %(message)s', level=logging.DEBUG)\n", (746, 847), False, 'import logging\n'), ((923, 955), 'src.svn_dump_update.svn_root_update', 'sdu.svn_root_update', (['config_file'], {}), '(config_file)\n', (942, 955), True, 'import src.svn_dump_update as sdu\n'), ((960, 1003), 'src.svn_dump_update.svn_experiment_root_update', 'sdu.svn_experiment_root_update', (['config_file'], {}), '(config_file)\n', (990, 1003), True, 'import src.svn_dump_update as sdu\n'), ((1043, 1101), 'src.run_transition.run_software_transition', 'rt.run_software_transition', (['config_file'], {'new_svn_dump': '(True)'}), '(config_file, new_svn_dump=True)\n', (1069, 1101), True, 'import src.run_transition as rt\n'), ((1106, 1171), 'src.run_transition.run_experiment_data_transition', 'rt.run_experiment_data_transition', (['config_file'], {'new_svn_dump': '(True)'}), '(config_file, new_svn_dump=True)\n', (1139, 1171), True, 'import src.run_transition as rt\n'), ((1176, 1234), 'src.run_transition.run_workflow_transition', 'rt.run_workflow_transition', (['config_file'], {'new_svn_dump': '(True)'}), '(config_file, new_svn_dump=True)\n', (1202, 1234), True, 'import src.run_transition as rt\n'), ((1239, 1297), 'src.run_transition.run_manifest_transition', 'rt.run_manifest_transition', (['config_file'], {'new_svn_dump': '(True)'}), '(config_file, new_svn_dump=True)\n', (1265, 1297), True, 'import src.run_transition as rt\n'), ((1355, 1366), 'time.time', 'time.time', ([], {}), '()\n', (1364, 1366), False, 'import time\n'), ((1531, 1558), 'src.run_transition.run_updates', 'rt.run_updates', (['config_file'], {}), '(config_file)\n', (1545, 1558), True, 'import src.run_transition as rt\n'), ((1600, 1611), 'time.time', 'time.time', ([], {}), '()\n', (1609, 1611), False, 'import time\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# This test is based on the test suite implemented for Recommenders project
# https://github.com/Microsoft/Recommenders/tree/master/tests
import papermill as pm
import pytest
import scrapbook as sb
from utils_cv.common.data import unzip_url
from utils_cv.detection.data import Urls
# Unless manually modified, python3 should be
# the name of the current jupyter kernel
# that runs on the activated conda environment
KERNEL_NAME = "python3"
OUTPUT_NOTEBOOK = "output.ipynb"
@pytest.mark.notebooks
def test_00_notebook_run(detection_notebooks):
notebook_path = detection_notebooks["00"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(PM_VERSION=pm.__version__),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["detection_bounding_box"].data) > 0
@pytest.mark.gpu
@pytest.mark.notebooks
def test_01_notebook_run(detection_notebooks, tiny_od_data_path):
notebook_path = detection_notebooks["01"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_data_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_losses"].data) > 0
training_aps = nb_output.scraps["training_average_precision"].data
assert len(training_aps) > 0
for d in training_aps:
assert isinstance(d, dict)
assert len(set([len(d) for d in training_aps])) == 1
@pytest.mark.gpu
@pytest.mark.notebooks
def test_02_notebook_run(detection_notebooks, tiny_od_mask_data_path):
notebook_path = detection_notebooks["02"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_mask_data_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_losses"].data) > 0
training_aps = nb_output.scraps["training_average_precision"].data
assert len(training_aps) > 0
for d in training_aps:
assert isinstance(d, dict)
assert len(set([len(d) for d in training_aps])) == 1
@pytest.mark.gpu
@pytest.mark.notebooks
def test_03_notebook_run(
detection_notebooks, tiny_od_keypoint_data_path, tmp_session
):
notebook_path = detection_notebooks["03"]
data_path2 = unzip_url(
Urls.fridge_objects_keypoint_top_bottom_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
IM_SIZE=100,
EPOCHS=1,
DATA_PATH=tiny_od_keypoint_data_path,
DATA_PATH2=data_path2,
THRESHOLD=0.01,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["keypoints"].data) == len(
nb_output.scraps["bboxes"].data
)
@pytest.mark.gpu
@pytest.mark.notebooks
def test_12_notebook_run(
detection_notebooks, tiny_od_data_path, tiny_ic_negatives_path
):
notebook_path = detection_notebooks["12"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_data_path,
NEG_DATA_PATH=tiny_ic_negatives_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["valid_accs"].data) == 1
assert 5 <= len(nb_output.scraps["hard_im_scores"].data) <= 10
| [
"scrapbook.read_notebook",
"utils_cv.common.data.unzip_url"
] | [((870, 903), 'scrapbook.read_notebook', 'sb.read_notebook', (['OUTPUT_NOTEBOOK'], {}), '(OUTPUT_NOTEBOOK)\n', (886, 903), True, 'import scrapbook as sb\n'), ((1418, 1451), 'scrapbook.read_notebook', 'sb.read_notebook', (['OUTPUT_NOTEBOOK'], {}), '(OUTPUT_NOTEBOOK)\n', (1434, 1451), True, 'import scrapbook as sb\n'), ((2191, 2224), 'scrapbook.read_notebook', 'sb.read_notebook', (['OUTPUT_NOTEBOOK'], {}), '(OUTPUT_NOTEBOOK)\n', (2207, 2224), True, 'import scrapbook as sb\n'), ((2708, 2825), 'utils_cv.common.data.unzip_url', 'unzip_url', (['Urls.fridge_objects_keypoint_top_bottom_tiny_path'], {'fpath': 'tmp_session', 'dest': 'tmp_session', 'exist_ok': '(True)'}), '(Urls.fridge_objects_keypoint_top_bottom_tiny_path, fpath=\n tmp_session, dest=tmp_session, exist_ok=True)\n', (2717, 2825), False, 'from utils_cv.common.data import unzip_url\n'), ((3223, 3256), 'scrapbook.read_notebook', 'sb.read_notebook', (['OUTPUT_NOTEBOOK'], {}), '(OUTPUT_NOTEBOOK)\n', (3239, 3256), True, 'import scrapbook as sb\n'), ((3888, 3921), 'scrapbook.read_notebook', 'sb.read_notebook', (['OUTPUT_NOTEBOOK'], {}), '(OUTPUT_NOTEBOOK)\n', (3904, 3921), True, 'import scrapbook as sb\n')] |
from reportlab.lib.units import inch
from reportlab.platypus import SimpleDocTemplate, Spacer
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch
from reportlab.platypus.flowables import Flowable
def generate_order(job, path, door_style, doors=[], drawers=[]):
PAGE_HEIGHT = defaultPageSize[1]
PAGE_WIDTH = defaultPageSize[0]
LEFT_MARGIN = 30
LINE_HEIGHT = 18
BACKGROUND_COLOR = (33 / 255, 80 / 255, 156 / 255)
CURSOR_HEIGHT = PAGE_HEIGHT - 60
INPUT_HEIGHT = LINE_HEIGHT - (LINE_HEIGHT * 0.1)
SPECIES = door_style.species
STYLE = door_style.name
INSIDE_PROFILE = door_style.inside_profile
OUTSIDE_PROFILE = door_style.outside_profile
TOTAL_DRS = len(doors)
TOTAL_DWRS = len(drawers)
def myFirstPage(c, doc):
cursor = CURSOR_HEIGHT
c.saveState()
c.setStrokeColorRGB(
BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]
)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(
LEFT_MARGIN, PAGE_HEIGHT - 40, PAGE_WIDTH - (LEFT_MARGIN * 2), 24, fill=1
)
c.setFillColorRGB(1, 1, 1)
c.setFont("Helvetica-Bold", 16)
c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT - 34, "DOOR ORDER FORM")
c.setFont("Helvetica", 12)
c.setFillColorRGB(0, 0, 0)
c.drawString(LEFT_MARGIN, cursor, f"Customer : JS Designs Shop, LLC")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
f"Order Date : {job.order_date}",
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"PO # : {job.name}-{STYLE}-{SPECIES}")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Delivery Date : ASAP"
)
cursor -= LINE_HEIGHT
c.setFont("Helvetica-Bold", 12)
c.drawString(LEFT_MARGIN, cursor, f"Door Style : {STYLE}")
c.setFont("Helvetica", 12)
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Phone : 901-853-7568"
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Panel : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 40,
y=cursor - 4,
name="Panel",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 60,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Comments : ")
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Wood Type : {SPECIES}")
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Inside Profile : {INSIDE_PROFILE}")
# c.acroForm.textfield(
# x=LEFT_MARGIN + 78,
# y=cursor - 4,
# name="inside_profile",
# value=" N/A ",
# height=INPUT_HEIGHT,
# width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 98,
# borderWidth=0,
# # fillColor=([1, 1, 1]),
# relative=True,
# )
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Outside Profile : {OUTSIDE_PROFILE}")
# c.acroForm.textfield(
# x=LEFT_MARGIN + 88,
# y=cursor - 4,
# name="outside_profile",
# value=" N/A ",
# height=INPUT_HEIGHT,
# width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 108,
# borderWidth=0,
# # fillColor=([1, 1, 1]),
# relative=True,
# )
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Stile/Rails : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 62,
y=cursor - 4,
name="stiles_rails",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 82,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.setFont("Helvetica-Bold", 12)
c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f"Drawer Fronts : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 375,
y=cursor - 4,
name="drawer_fronts",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 92,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.setFont("Helvetica", 12)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Boring For Hinges : No")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f"Outside Profile : "
)
c.acroForm.textfield(
x=LEFT_MARGIN + 370,
y=cursor - 4,
name="out_profile",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 87,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Add Hinges : No")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
f" 5 PC Front: Slab:",
)
c.acroForm.textfield(
x=LEFT_MARGIN + 350,
y=cursor - 4,
name="5_pc_front",
value=" N/A ",
height=INPUT_HEIGHT,
width=30,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.acroForm.textfield(
x=LEFT_MARGIN + 430,
y=cursor - 4,
name="slab_front",
value=" N/A ",
height=INPUT_HEIGHT,
width=30,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
cursor -= 12
c.setFont("Times-Italic", 10)
c.drawString(
LEFT_MARGIN,
cursor,
f"Boring not available in arched doors, applied mould doors",
)
cursor -= 10
c.drawString(
LEFT_MARGIN,
cursor,
f"and raised bead profile mitered doors",
)
cursor -= 14
c.setFont("Times-BoldItalic", 12)
c.drawString(
LEFT_MARGIN, cursor, f'Cullman will not bore any door with 2" stiles'
)
cursor -= 20
c.setFont("Helvetica-Bold", 14)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, f"Total Doors: {TOTAL_DRS}")
c.drawCentredString(
((PAGE_WIDTH / 4) * 3) + 10, cursor, f"Total Drawer Fronts: {TOTAL_DWRS}"
)
cursor -= 24
c.setStrokeColorRGB(0, 0, 0)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1)
c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1)
c.setFont("Helvetica-Bold", 12)
c.setFillColorRGB(1, 1, 1)
string_center = LEFT_MARGIN + 68
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
string_center += 155
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
c.setFont("Helvetica", 9)
c.setFillColorRGB(0, 0, 0)
c.drawCentredString(
PAGE_WIDTH / 2, 40, f"Page 1 of {job.name}-{STYLE}-{SPECIES}"
)
c.drawCentredString(
PAGE_WIDTH / 2,
20,
'Reminder : Any doors 46" and over in height will automatically receive a horizontal center rail unless otherwise noted.',
)
c.restoreState()
def myLaterPages(c, doc):
cursor = PAGE_HEIGHT - 54
c.saveState()
c.setFont("Helvetica-Bold", 14)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, "Doors")
c.drawCentredString(((PAGE_WIDTH / 4) * 3) + 10, cursor, "Drawer Fronts")
cursor -= 24
c.setStrokeColorRGB(0, 0, 0)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1)
c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1)
c.setFont("Helvetica-Bold", 12)
c.setFillColorRGB(1, 1, 1)
string_center = LEFT_MARGIN + 68
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
string_center += 155
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
c.setFont("Helvetica", 9)
c.setFillColorRGB(0, 0, 0)
c.drawCentredString(
PAGE_WIDTH / 2, 40, f"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}"
)
c.drawCentredString(
PAGE_WIDTH / 2,
20,
'Reminder : Any doors 46" and over in height will automatically receive a horizontal center rail unless otherwise noted.',
)
c.restoreState()
class OrderEntry(Flowable):
"""Draws table entry for each item in list of door sizes."""
def __init__(
self,
xoffset=0,
height=20,
dr_qty="",
dr_size="",
dwr_qty="",
dwr_size="",
index=0,
):
Flowable.__init__(self)
self.dr_qty = dr_qty
self.dr_size = dr_size
self.dwr_qty = dwr_qty
self.dwr_size = dwr_size
self.index = index
self.height = height
self.idx_box_x = xoffset
self.idx_box_width = 40
self.string_center = xoffset + (self.idx_box_width / 2)
self.qty_box_x = self.idx_box_width + xoffset
self.qty_box_width = 60
self.size_box_x = self.qty_box_width - 10
self.size_box_width = 170
self.second_column_offset = 270
def draw(self):
# Door
self.canv.setStrokeColorRGB(0, 0, 0)
self.canv.setFillColorRGB(
BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]
)
self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height, fill=1)
self.canv.setFillColorRGB(1, 1, 1)
self.canv.setFont("Helvetica", 12)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, str(self.index)
)
self.canv.setFillColorRGB(0, 0, 0)
self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height)
self.string_center += (self.idx_box_width / 2) + (self.qty_box_width / 2)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dr_qty
)
self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height)
self.string_center += (self.qty_box_width / 2) + (self.size_box_width / 2)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dr_size
)
# Drawer
if self.dwr_qty != "" and self.dwr_size != "":
self.canv.rect(
self.second_column_offset + self.qty_box_x,
0,
self.qty_box_width,
self.height,
)
self.string_center += 155
self.canv.drawCentredString(
self.string_center,
0.25 * self.height,
self.dwr_qty,
)
self.canv.rect(
self.second_column_offset + self.size_box_x,
0,
self.size_box_width,
self.height,
)
self.string_center += (self.qty_box_width / 2) + (
self.size_box_width / 2
)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dwr_size
)
def build_pdf(path, name, door_list, drawer_list):
doc = SimpleDocTemplate(f"{path}/{name}-{STYLE}.pdf")
Story = [Spacer(1, 3.11 * inch)]
num_of_doors = len(door_list)
num_of_drawers = len(drawer_list)
num_of_entries = max(num_of_doors, num_of_drawers)
for i in range(0, num_of_entries):
try:
door_qty, door_size = door_list[i]["qty"], door_list[i]["size"]
except IndexError:
door_qty, door_size = "", ""
try:
drawer_qty, drawer_size = drawer_list[i]["qty"], drawer_list[i]["size"]
except IndexError:
drawer_qty, drawer_size = "", ""
p = OrderEntry(
xoffset=-50,
dr_qty=door_qty,
dr_size=door_size,
dwr_qty=drawer_qty,
dwr_size=drawer_size,
index=i + 1,
)
Story.append(p)
doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)
build_pdf(path, job.name, doors, drawers)
| [
"reportlab.platypus.SimpleDocTemplate",
"reportlab.platypus.flowables.Flowable.__init__",
"reportlab.platypus.Spacer"
] | [((13558, 13605), 'reportlab.platypus.SimpleDocTemplate', 'SimpleDocTemplate', (['f"""{path}/{name}-{STYLE}.pdf"""'], {}), "(f'{path}/{name}-{STYLE}.pdf')\n", (13575, 13605), False, 'from reportlab.platypus import SimpleDocTemplate, Spacer\n'), ((10743, 10766), 'reportlab.platypus.flowables.Flowable.__init__', 'Flowable.__init__', (['self'], {}), '(self)\n', (10760, 10766), False, 'from reportlab.platypus.flowables import Flowable\n'), ((13623, 13645), 'reportlab.platypus.Spacer', 'Spacer', (['(1)', '(3.11 * inch)'], {}), '(1, 3.11 * inch)\n', (13629, 13645), False, 'from reportlab.platypus import SimpleDocTemplate, Spacer\n')] |
from bs4 import BeautifulSoup
import logging
import pandas as pd
import csv
import re
import requests
from urllib.parse import urljoin
logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
def get_html(url):
return requests.get(url).text
class SenateCrawler:
def __init__(self):
self.base_url = "https://www25.senado.leg.br/"
self.search_url = self.base_url + "web/senadores/em-exercicio/-/e/por-nome"
self.senate = []
def get_senate(self, url):
soup = BeautifulSoup(get_html(self.search_url), "html.parser")
trs = soup.find("table").find("tbody").find_all("tr")
for tr in trs:
cells = tr.find_all("td")
senateperson = {
"name": cells[0].get_text(),
"party": cells[1].get_text(),
"email": cells[5].get_text(),
}
if senateperson["email"]:
self.senate.append(senateperson)
def run(self):
try:
self.get_senate(self.search_url)
except Exception:
logging.exception("global failure")
finally:
df = pd.DataFrame(self.senate)
df.to_csv("senate.csv")
logging.info("program exited")
| [
"logging.basicConfig",
"requests.get",
"logging.exception",
"pandas.DataFrame",
"logging.info"
] | [((137, 229), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s:%(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s %(levelname)s:%(message)s', level=\n logging.INFO)\n", (156, 229), False, 'import logging\n'), ((257, 274), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (269, 274), False, 'import requests\n'), ((1171, 1196), 'pandas.DataFrame', 'pd.DataFrame', (['self.senate'], {}), '(self.senate)\n', (1183, 1196), True, 'import pandas as pd\n'), ((1245, 1275), 'logging.info', 'logging.info', (['"""program exited"""'], {}), "('program exited')\n", (1257, 1275), False, 'import logging\n'), ((1101, 1136), 'logging.exception', 'logging.exception', (['"""global failure"""'], {}), "('global failure')\n", (1118, 1136), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
"""
Lacework Container Registries API wrapper.
"""
import logging
logger = logging.getLogger(__name__)
class ContainerRegistriesAPI(object):
"""
Lacework Container Registries API.
"""
def __init__(self, session):
"""
Initializes the ContainerRegistriesAPI object.
:param session: An instance of the HttpSession class
:return ContainerRegistriesAPI object.
"""
super(ContainerRegistriesAPI, self).__init__()
self._session = session
def create(self,
name,
type,
enabled,
data,
org=False):
"""
A method to create a new container registry.
:param name: A string representing the container registry name.
:param type: A string representing the container registry type.
:param enabled: A boolean/integer representing whether the container registry is enabled.
(0 or 1)
:param data: A JSON object matching the schema for the specified type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Creating container registry in Lacework...")
# Build the Container Registries request URI
api_uri = "/api/v2/ContainerRegistries"
data = {
"name": name,
"type": type,
"enabled": int(bool(enabled)),
"data": data
}
response = self._session.post(api_uri, org=org, data=data)
return response.json()
def get(self,
guid=None,
type=None,
org=False):
"""
A method to get all container registries.
:param guid: A string representing the container registry GUID.
:param type: A string representing the container registry type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Getting container registry info from Lacework...")
# Build the Container Registries request URI
if guid:
api_uri = f"/api/v2/ContainerRegistries/{guid}"
elif type:
api_uri = f"/api/v2/ContainerRegistries/{type}"
else:
api_uri = "/api/v2/ContainerRegistries"
response = self._session.get(api_uri, org=org)
return response.json()
def get_by_type(self,
type,
org=False):
"""
A method to get all container registries by type.
:param type: A string representing the container registry type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
return self.get(type=type, org=org)
def get_by_guid(self,
guid,
org=False):
"""
A method to get all container registries.
:param guid: A string representing the container registry GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
return self.get(guid=guid, org=org)
def search(self,
query_data=None,
org=False):
"""
A method to search container registries.
:param query_data: A dictionary containing the desired search parameters.
(filters, returns)
:return response json
"""
logger.info("Searching container registries from Lacework...")
# Build the Container Registries request URI
api_uri = "/api/v2/ContainerRegistries/search"
response = self._session.post(api_uri, data=query_data, org=org)
return response.json()
def update(self,
guid,
name=None,
type=None,
enabled=None,
data=None,
org=False):
"""
A method to update an container registry.
:param guid: A string representing the container registry GUID.
:param name: A string representing the container registry name.
:param type: A string representing the container registry type.
:param enabled: A boolean/integer representing whether the container registry is enabled.
(0 or 1)
:param data: A JSON object matching the schema for the specified type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Updating container registry in Lacework...")
# Build the Container Registries request URI
api_uri = f"/api/v2/ContainerRegistries/{guid}"
tmp_data = {}
if name:
tmp_data["name"] = name
if type:
tmp_data["type"] = type
if enabled is not None:
tmp_data["enabled"] = int(bool(enabled))
if data:
tmp_data["data"] = data
response = self._session.patch(api_uri, org=org, data=tmp_data)
return response.json()
def delete(self,
guid,
org=False):
"""
A method to delete an container registry.
:param guid: A string representing the container registry GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Deleting container registry in Lacework...")
# Build the Container Registries request URI
api_uri = f"/api/v2/ContainerRegistries/{guid}"
response = self._session.delete(api_uri, org=org)
if response.status_code == 204:
return response
else:
return response.json()
| [
"logging.getLogger"
] | [((101, 128), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (118, 128), False, 'import logging\n')] |
import json
import re
import responses
from werkzeug.test import Client
from werkzeug.wrappers import Response
from satosa.proxy_server import make_app
from satosa.satosa_config import SATOSAConfig
class TestConsent:
def test_full_flow(self, satosa_config_dict, consent_module_config):
api_url = "https://consent.example.com/api"
redirect_url = "https://consent.example.com/redirect"
consent_module_config["config"]["api_url"] = api_url
consent_module_config["config"]["redirect_url"] = redirect_url
satosa_config_dict["MICRO_SERVICES"].append(consent_module_config)
# application
test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response)
# incoming auth req
http_resp = test_client.get("/{}/{}/request".format(satosa_config_dict["BACKEND_MODULES"][0]["name"],
satosa_config_dict["FRONTEND_MODULES"][0]["name"]))
assert http_resp.status_code == 200
verify_url_re = re.compile(r"{}/verify/\w+".format(api_url))
with responses.RequestsMock() as rsps:
# fake no previous consent
consent_request_url_re = re.compile(r"{}/creq/\w+".format(api_url))
rsps.add(responses.GET, verify_url_re, status=401)
rsps.add(responses.GET, consent_request_url_re, "test_ticket", status=200)
# incoming auth resp
http_resp = test_client.get("/{}/response".format(satosa_config_dict["BACKEND_MODULES"][0]["name"]))
assert http_resp.status_code == 302
assert http_resp.headers["Location"].startswith(redirect_url)
with responses.RequestsMock() as rsps:
# fake consent
rsps.add(responses.GET, verify_url_re, json.dumps({"foo": "bar"}), status=200)
# incoming consent response
http_resp = test_client.get("/consent/handle_consent")
assert http_resp.status_code == 200
| [
"satosa.satosa_config.SATOSAConfig",
"json.dumps",
"responses.RequestsMock"
] | [((1099, 1123), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (1121, 1123), False, 'import responses\n'), ((1686, 1710), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (1708, 1710), False, 'import responses\n'), ((676, 708), 'satosa.satosa_config.SATOSAConfig', 'SATOSAConfig', (['satosa_config_dict'], {}), '(satosa_config_dict)\n', (688, 708), False, 'from satosa.satosa_config import SATOSAConfig\n'), ((1798, 1824), 'json.dumps', 'json.dumps', (["{'foo': 'bar'}"], {}), "({'foo': 'bar'})\n", (1808, 1824), False, 'import json\n')] |
import argparse
import glob
import os
import pickle
from pathlib import Path
import numpy as np
from PIL import Image
from tqdm import tqdm
from src.align.align_trans import get_reference_facial_points, warp_and_crop_face
# sys.path.append("../../")
from src.align.detector import detect_faces
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="face alignment")
parser.add_argument(
"-source_root",
"--source_root",
help="specify your source dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-dest_root",
"--dest_root",
help="specify your destination dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-crop_size",
"--crop_size",
help="specify size of aligned faces, align and crop with padding",
default=112,
type=int,
)
args = parser.parse_args()
source_root = args.source_root # specify your source dir
dest_root = args.dest_root # specify your destination dir
crop_size = (
args.crop_size
) # specify size of aligned faces, align and crop with padding
scale = crop_size / 112.0
reference = get_reference_facial_points(default_square=True) * scale
cwd = os.getcwd() # delete '.DS_Store' existed in the source_root
os.chdir(source_root)
os.system("find . -name '*.DS_Store' -type f -delete")
os.chdir(cwd)
imfiles = [
f
for f in glob.glob(f"{source_root}F????/MID*/faces/msceleb*")
if Path(f).is_file()
]
# images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles}
meta = {}
# for subfolder in tqdm(os.listdir(source_root)):
for imfile in tqdm(imfiles):
ref = imfile.replace(source_root, "")
print("Processing\t{}".format(imfile))
img = Image.open(imfile)
try: # Handle exception
bbs, landmarks = detect_faces(img)
except Exception:
print("{} is discarded due to exception!".format(imfile))
continue
ref = imfile.replace(source_root, "")
ndetections = len(landmarks)
if (
ndetections == 0
): # If the landmarks cannot be detected, the img will be discarded
print("{} is discarded due to non-detected landmarks!".format(imfile))
meta[ref] = []
continue
li_meta = []
for i in range(ndetections):
im_meta = {}
im_meta["face"] = i
im_meta["landmarks"] = landmarks[i]
im_meta["bb"] = bbs[i]
facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)]
warped_face = warp_and_crop_face(
np.array(img),
facial5points,
reference,
crop_size=(crop_size, crop_size),
)
img_warped = Image.fromarray(warped_face)
image_name = imfile.replace("images", "cropped").replace(
".jpg", "-{:02d}.jpg".format(i)
)
# im_meta['ref'] = "/".join(image_name.split('/')[-5:])
img_warped.save(image_name)
li_meta.append(im_meta)
meta[ref] = li_meta
with open(source_root + "cropped-meta.pkl", "wb") as f:
pickle.dump(meta, f)
| [
"PIL.Image.fromarray",
"PIL.Image.open",
"pickle.dump",
"argparse.ArgumentParser",
"src.align.align_trans.get_reference_facial_points",
"pathlib.Path",
"src.align.detector.detect_faces",
"tqdm.tqdm",
"os.getcwd",
"os.chdir",
"numpy.array",
"os.system",
"glob.glob"
] | [((338, 391), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""face alignment"""'}), "(description='face alignment')\n", (361, 391), False, 'import argparse\n'), ((1351, 1362), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1360, 1362), False, 'import os\n'), ((1416, 1437), 'os.chdir', 'os.chdir', (['source_root'], {}), '(source_root)\n', (1424, 1437), False, 'import os\n'), ((1442, 1496), 'os.system', 'os.system', (['"""find . -name \'*.DS_Store\' -type f -delete"""'], {}), '("find . -name \'*.DS_Store\' -type f -delete")\n', (1451, 1496), False, 'import os\n'), ((1501, 1514), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (1509, 1514), False, 'import os\n'), ((1825, 1838), 'tqdm.tqdm', 'tqdm', (['imfiles'], {}), '(imfiles)\n', (1829, 1838), False, 'from tqdm import tqdm\n'), ((1283, 1331), 'src.align.align_trans.get_reference_facial_points', 'get_reference_facial_points', ([], {'default_square': '(True)'}), '(default_square=True)\n', (1310, 1331), False, 'from src.align.align_trans import get_reference_facial_points, warp_and_crop_face\n'), ((1947, 1965), 'PIL.Image.open', 'Image.open', (['imfile'], {}), '(imfile)\n', (1957, 1965), False, 'from PIL import Image\n'), ((3408, 3428), 'pickle.dump', 'pickle.dump', (['meta', 'f'], {}), '(meta, f)\n', (3419, 3428), False, 'import pickle\n'), ((1559, 1611), 'glob.glob', 'glob.glob', (['f"""{source_root}F????/MID*/faces/msceleb*"""'], {}), "(f'{source_root}F????/MID*/faces/msceleb*')\n", (1568, 1611), False, 'import glob\n'), ((2028, 2045), 'src.align.detector.detect_faces', 'detect_faces', (['img'], {}), '(img)\n', (2040, 2045), False, 'from src.align.detector import detect_faces\n'), ((3007, 3035), 'PIL.Image.fromarray', 'Image.fromarray', (['warped_face'], {}), '(warped_face)\n', (3022, 3035), False, 'from PIL import Image\n'), ((2845, 2858), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2853, 2858), True, 'import numpy as np\n'), ((1623, 1630), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (1627, 1630), False, 'from pathlib import Path\n')] |
from srtvoiceext import extract
if __name__ == '__main__':
ext = extract('video.mkv', 'subtitles.srt', 'outdir') | [
"srtvoiceext.extract"
] | [((70, 117), 'srtvoiceext.extract', 'extract', (['"""video.mkv"""', '"""subtitles.srt"""', '"""outdir"""'], {}), "('video.mkv', 'subtitles.srt', 'outdir')\n", (77, 117), False, 'from srtvoiceext import extract\n')] |
# encoding: utf-8
import urwid
import time, os, copy
from rpg_game.utils import log, mod, distance
from rpg_game.constants import *
from urwid import raw_display
SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows()
MIN_HEADER_HEIGHT = 3
MAX_MENU_WIDTH = 48
FOOTER_HEIGHT = 4
PALETTE = [
("line", 'black', 'white', "standout"),
("top","white","black"),
("frame","white","white"),
("player", "light green", "black"),
("other", "light blue", "black"),
("monster", "dark red", "black"),
("fatigued", "dark red", "white", "standout"),
("reversed", "standout", ""),
("common","white","black"),
("common_line","black","white","standout"),
("uncommon","dark cyan","black"),
("uncommon_line","dark cyan","white","standout"),
("rare","yellow","black"),
("rare_line","yellow","white","standout"),
("unique","light magenta","black"),
("unique_line","light magenta","white","standout"),
("set","light green","black"),
("set_line","light green","white","standout"),
("normal","white","black"),
("positive","light green","black"),
("negative","dark red","black"),
("white","white","black"),
("disabled","dark red","black"),
("red","dark red","black"),
("green","light green","black"),
("yellow","yellow","black"),
("brown","brown","black"),
("white_line","black","white", "standout"),
("red_line","dark red","white", "standout"),
("green_line","light green","white", "standout"),
("yellow_line","yellow","white", "standout"),
("cyan","light cyan","black"),
("cyan_line","light cyan","white", "standout"),
("name","white","black"),
]
class UiFrame(urwid.Frame):
def __init__(self, parent, mind, *args, **kargs):
self.parent = parent
self.mind = mind
urwid.AttrMap(self,"frame")
super().__init__(*args, **kargs)
@property
def player(self):
if self.mind.avatar.uuid in self.mind.master.players:
return self.mind.master.players[self.mind.avatar.uuid]
else:
return None
@property
def connection(self):
if self.mind.avatar.uuid in self.mind.connections:
return self.mind.connections[self.mind.avatar.uuid]
else:
return None
def handle_input(self, _input):
pass
def on_update(self):
pass
def dispatch_event(self, event_type, *args):
self.mind.get_GUI_event(event_type, *args)
def register_event(self, event_type, callback):
self.mind.register_GUI_event(event_type, callback)
def disconnect(self):
pass
def restart(self):
pass
def focus_next(self):
pass
def focus_previous(self):
pass
def update_body(self, title, no_title=False, boxed=False):
self.active_body = self.bodies[title]
if boxed:
if no_title:
self.contents["body"] = (urwid.LineBox(self.active_body), None)
else:
self.contents["body"] = (urwid.LineBox(self.active_body, title=title), None)
else:
self.contents["body"] = (self.active_body, None)
class GUI(UiFrame):
def __init__(self, parent, mind):
self.bodies = {"Intro" : IntroFrame(self, mind)}
self.active_body = self.bodies["Intro"]
super().__init__(parent, mind, self.active_body)
def on_update(self):
self.active_body.on_update()
def handle_input(self, _input):
# print("HANDLING", _input)
self.active_body.handle_input(_input)
# def exit(self):
# self.disconnect()
# self.mind.disconnect()#should use dispatch event
def restart(self):
self.update_body("Intro", no_title=True)
def start_game_frame(self):
self.bodies["Game"] = GameFrame(self, self.mind)
self.update_body("Game", no_title=True)
class IntroFrame(UiFrame):
def __init__(self, parent, mind):
# urwid.Padding(urwid.BigText(('top', "Hack\'n\'SSH"), urwid.HalfBlock5x4Font())),
self.choices = ("Warrior", "Dwarf", "Wizard", "Thief", "Bard")
self.descriptions = {"Warrior": "The mighty warrior\n\nStrength +1, Hit points +4\nCharge and parry",
"Dwarf": "The short dwarf\n\nStrength +1, Constitution +1, Hit points +6\nDemolish and parry",
"Wizard": "The opportune wizard\n\nIntelligence +1\n Fireball, teleport and ice wall",
"Thief": "The sneaky thief\n\nDexterity +1, Intelligence +1, Hit points +2\nSneak attack, hide and trap",
"Bard": "The noisy bard\n\nCharisma +1, Dexterity +1, Intelligence +1, Hit points +2\nSing and summon"}
line = []
for c in self.choices:
btn = attr_button(c, self.select_class)
line.append(btn)
walker = urwid.SimpleFocusListWalker(line)
urwid.connect_signal(walker, "modified", self.update_description)
self.listbox = SelectableListBox(walker)
header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1))
super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions["Warrior"])])), header=header, focus_part="header")
def select_class(self, button):
index = min(self.listbox.focus_position, len(self.choices)-1)
choice = self.choices[index]
self.mind.master.new_player(self.mind.avatar.uuid, choice)
self.parent.start_game_frame()
def update_description(self):
index = min(self.listbox.focus_position, len(self.choices)-1)
choice = self.choices[index]
self.contents["body"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None)
class GameFrame(UiFrame):
def __init__(self, parent, mind):
self.mind = mind
_header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text("")])), self.header_height))
self._menu_view = True
self.map = MapFrame(self, mind)
self.menu = MenuFrame(self, mind)
super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)], focus_column=1), header=_header, footer=None, focus_part="body")
self.menu_view = True
self.update_footer()
self.header_widget = self.header.original_widget.box_widget
self.footer_content_size = 0
@property
def header_height(self):
return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8)
@property
def menu_width(self):
if self.menu_view:
return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7)
return 0
@property
def map_width(self):
if self.menu_view:
return self.mind.screen_size[0] - self.menu_width
return self.mind.screen_size[0]
@property
def body_width(self):
return self.mind.screen_size[0]
@property
def body_height(self):
return self.mind.screen_size[1] - self.header_height - FOOTER_HEIGHT - 2
@property
def menu_view(self):
return self._menu_view
@menu_view.setter
def menu_view(self, value):
self._menu_view = value
_columns = [(self.map_width, self.map), (self.menu_width, self.menu)]
self.contents["body"] = (urwid.Columns(_columns, focus_column=1), None)
@property
def header_list(self):
return sorted([ent for k, ent in self.player.location.entities.items() if distance(self.player.position, ent.position) <= 3 and ent.status], key=lambda ent: distance(self.player.position, ent.position))
def update_footer(self):
_size = 0
inv_btns = []
for i, obj in self.player.inventory.content.items():
if obj:
_size += 1
if obj.is_equipment and obj.is_equipped:
_marker = ["[", (obj.color, f"{obj.marker[0]}"), "]"]
elif obj.is_equipment and not obj.is_equipped:
_marker = ["]", (obj.color, f"{obj.marker[0]}"), "["]
elif obj.is_consumable:
_marker = ["(", (obj.color, f"{obj.marker[0]}"), ")"]
else:
_marker = [f" {obj.marker[0]} "]
else:
_marker = [f" "]
if i < 9:
_num = f"\n {i+1} "
elif i == 9:
_num = "\n 0 "
elif i == 10:
_num = "\n - "
elif i == 11:
_num = "\n = "
if obj and obj is self.player.inventory.selection:
_marker += [("line", _num)]
else:
_marker += [("top", _num)]
btn = urwid.Text(_marker, align="center")
inv_btns.append((5, urwid.LineBox(btn)))
if self.mind.screen_size != (80, 24):
inv_btns.append(urwid.Text("\nSET TERMINAL\nTO 80X24", align="center"))
self.contents["footer"] = (SelectableColumns(inv_btns, dividechars=0), None)
self.footer_content_size = _size
def on_update(self):
self.update_header()
if self.footer_content_size != len(self.player.inventory.all):
self.update_footer()
if self.mind.screen_size != (80, 24):
self.update_footer()
self.map.on_update()
if self.menu_view:
self.menu.on_update()
def handle_input(self, _input):
if _input == "tab":
self.menu_view = not self.menu_view
elif _input == "enter" and self.player.inventory.selection:
self.player.use_quick_item(self.player.inventory.selection)
self.update_footer()
elif _input == "Q" and self.player.inventory.selection:
self.player.actions["drop"].use(self.player, obj=self.player.inventory.selection)
self.update_footer()
elif _input.isnumeric() or _input in ("-", "="):
self.select_item(_input)
self.update_footer()
elif _input == self.mind.key_map["status-menu"] and self.menu_view:
self.menu.update_body("Status")
elif _input == self.mind.key_map["help-menu"] and self.menu_view:
self.menu.update_body("Help")
elif _input == self.mind.key_map["equipment-menu"] and self.menu_view:
self.menu.update_body("Equipment")
elif _input == self.mind.key_map["inventory-menu"] and self.menu_view:
self.menu.update_body("Inventory")
else:
self.map.handle_input(_input)
def select_item(self, _input):
if _input.isnumeric() and int(_input) > 0:
_input = int(_input)-1
elif _input == "0":
s_input = 9
elif _input == "-":
_input = 10
elif _input == "=":
_input = 11
self.player.inventory.selection = self.player.inventory.get(_input)
def update_header(self):
widgets = []
for p in self.header_list:
widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap="clip"), {self.player.id:"player"}), {p.id:"other" for i, p in self.mind.master.players.items()}))
if widgets:
self.header_widget.body[:] = widgets
class MapFrame(UiFrame):
def __init__(self, parent, mind):
map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.map_box = map_box.body
self.layer_view = -1
self.debug_view = False
super().__init__(parent, mind, map_box)
self.on_update()
@property
def visible_range(self):
header_height = self.parent.header_height + 2
tot_rows = self.mind.screen_size[1]
return (tot_rows - header_height - FOOTER_HEIGHT)
def on_update(self):
if self.layer_view == -1:
_map = copy.deepcopy(self.player.location.map)
else:
_map = self.player.location.layer_from_entities(self.layer_view, self.debug_view)
x, y, z = self.player.position
w = max(0, y - self.parent.body_width//3)
visible_map = [line[w:w+self.parent.body_width] for line in _map]
h = max(0, x - self.parent.body_height//2)
if h+self.parent.body_height >= len(visible_map):
visible_map = visible_map[len(visible_map)-self.parent.body_height:]
else:
visible_map = visible_map[h:h+self.parent.body_height]
map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap="clip"), {self.player.id:"player"}), {p.id:"other" for i, p in self.mind.master.players.items()}) for line in visible_map]
self.map_box[:] = map_with_attr
def handle_input(self, _input):
if _input == "ctrl f":
self.debug_view = not self.debug_view
elif _input == "ctrl v":
self.layer_view = self.layer_view + 1
if self.layer_view > 2:
self.layer_view = -1
elif _input in self.mind.key_map:
_action = self.mind.key_map[_input]
self.player.handle_input(_action)
class MenuFrame(UiFrame):
def __init__(self, parent, mind):
_frames = ("Inventory", "Status", "Equipment", "Help")
self.bodies = {b : globals()[f"{b}Frame"](self, mind) for b in _frames}
idx = -1
_title = _frames[idx]
self.active_body = self.bodies[_title]
super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title))
def on_update(self):
self.active_body.on_update()
def selectable(self):
return False
def update_body(self, _title):
self.active_body = self.bodies[_title]
self.contents["body"] = (urwid.LineBox(self.active_body, title=_title), None)
class InventoryFrame(UiFrame):
def __init__(self, parent, mind):
columns = urwid.Columns([urwid.Text("")])
box = urwid.ListBox(urwid.SimpleListWalker([columns]))
self.box = box.body
self.default_header = urwid.Text("0/9-= to select\n\n", align="center")
self.default_footer = urwid.Text([("green", f"{'Enter:use/eqp':<14s}"), ("yellow", "Q:drop")], align="center")
super().__init__(parent, mind, box, header=self.default_header, footer=self.default_footer)
@property
def selection_data(self):
if not self.player.inventory.selection:
return urwid.Text("")
i = self.player.inventory.selection
_text = []
_text += [i.eq_description, f"\nEncumbrance:{i.encumbrance}\n"]
return urwid.Text(_text)
def update_header(self):
if not self.player.inventory.selection:
self.contents["header"] = (self.default_header, None)
else:
i = self.player.inventory.selection
self.contents["header"] = (urwid.Text([(i.color, f"{i.name}\n"), f"{i.description}\n"], align="center"), None)
def update_footer(self):
if not self.player.inventory.selection:
self.contents["footer"] = (self.default_footer, None)
else:
i = self.player.inventory.selection
_text = []
if not i.requisites(self.player):
_text += [("red", f"{'Cannot equip':<14s}")]
elif not i.is_equipped:
_text += [("green", f"{'Enter:equip':<14s}")]
elif i.is_equipped:
_text += [("green", f"{'Enter:unequip':<14s}")]
elif i.is_consumable:
_text += [("green", f"{'Enter:use':<14s}")]
_text += [("yellow", "Q:drop")]
self.contents["footer"] = (urwid.Text(_text, align="center"), None)
def update_body(self):
side = urwid.Text("║")
width = 8
height = 6
_marker_box = ["╔" +"═"*width+"╗\n"]
for x in range(height):
_marker_box += ["║"]
for y in range(width):
_marker_box += ["."]
_marker_box += ["║\n"]
_marker_box += ["╚" +"═"*width+"╝"]
if self.player.inventory.selection:
i = self.player.inventory.selection
X_OFFSET = 2
Y_OFFSET = 4
for m, pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions):
x, y = pos
_marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m)
self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)]
def on_update(self):
self.update_header()
self.update_body()
self.update_footer()
class StatusFrame(UiFrame):
def __init__(self, parent, mind):
box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.box = box.body
super().__init__(parent, mind, box)
def on_update(self):
player = self.player
x, y, z = player.position
_top = f"{player.name:<12s} {player.game_class.name:<10s}\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\n"
_left = []
for s in CHARACTERISTICS:
c = getattr(player, s)
state = ["normal", "positive", "negative"][-int(c.temp_bonus < 0) + int(c.temp_bonus > 0)]
if self.parent.parent.menu_width > 40:
_name = c.name[0].upper() + c.name[1:]
_left += [f"{_name:<12} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
elif self.parent.parent.menu_width > 36:
_name = c.name[0].upper() + c.name[1:6]
_left += [f"{_name:<6} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
else:
_left += [f"{s:<3} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
_right = []
base = player.STR.mod
weapon = player.equipment["main_hand"]
if not weapon:
min_dmg, max_dmg = (1, 4)
else:
number, value = weapon.dmg
min_dmg, max_dmg = (number * 1, number * value)
min_dmg = max(1, base + min_dmg)
max_dmg = max(1, base + max_dmg)
_right.append(f"Damage {min_dmg:>3d}-{max_dmg:<3d}\n")
_right.append(f"Reduction {player.dmg_reduction:<3d}\n")
_right.append(f"Encumb ")
if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance:
_right.append(("red", f"{player.inventory.encumbrance:>2d}"))
elif player.inventory.encumbrance > player.encumbrance:
_right.append(("yellow", f"{player.inventory.encumbrance:>2d}"))
else:
_right.append(("white", f"{player.inventory.encumbrance:>2d}"))
_right.append(f"/{player.encumbrance:<2d}\n")
_right.append(f"Speed {player.movement_speed}\n")
_right.append(f"Monsterized {player.MP:<2d}\n")
self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars = 1) ]
class EquipmentFrame(UiFrame):
def __init__(self, parent, mind):
box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.box = box.body
super().__init__(parent, mind, box)
def on_update(self):
player = self.player
_equipment = []
for t, obj in player.equipment.items():
_name = t.replace("_", " ")
_name = _name[0].upper() + _name[1:]
if obj:
_equipment += [urwid.Text([f"{_name}: ", (obj.color, f"{obj.name}")])]
else:
_equipment += [urwid.Text([f"{_name}: "])]
_bonus = {}
for eqp in player.equipment_set:
for b in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())):
val = player.full_eqp_bonus(eqp, b)
if b not in _bonus:
_bonus[b] = val
else:
_bonus[b] += val
_top = ""
for b, val in _bonus.items():
if b == "dmg_reduction":
_top += f"Reduction:{val} "
else:
_top += f"{b}:{val} "
_top += "\n"
self.box[:] = [urwid.Text(_top)] + _equipment
class HelpFrame(UiFrame):
def __init__(self, parent, mind):
self.mind = mind
map_commands = ["Map commands\n\n", f"←→↑↓:move\n", f"shift+←→↑↓:dash\n", f"a:attack\n", f"q:pickup\n"]
class_action_keys = [k for k, act in self.mind.key_map.items() if act.startswith("class_ability")]
for i, act in enumerate(self.player.class_actions):
k = class_action_keys[i]
map_commands.append(f"{k}:{self.player.class_actions[act].description.lower()}\n")
menu_commands = ["Menu commands\n\n", f"tab:open/close\n",f"0/9-=:select item\n", f"ctrl+p:respawn\n", f"ctrl+a:inventory\n", f"ctrl+s:status\n", f"ctrl+d:help\n", f"ctrl+e:equipment\n"]
columns = urwid.Columns([urwid.Text(map_commands, wrap="clip"), urwid.Text(menu_commands, wrap="clip")], dividechars = 1)
super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns])))
class SelectableListBox(urwid.ListBox):
def __init__(self, body):
super(SelectableListBox, self).__init__(body)
def focus_next(self):
try:
self.focus_position += 1
except IndexError:
pass
def focus_previous(self):
try:
self.focus_position -= 1
except IndexError:
pass
class SelectableColumns(urwid.Columns):
def __init__(self, widget_list, focus_column=None, dividechars=0):
super().__init__(widget_list, dividechars, focus_column)
def focus_next(self):
try:
self.focus_position += 1
except:
pass
def focus_previous(self):
try:
self.focus_position -= 1
except:
pass
class FrameColumns(urwid.Columns):
def __init__(self, parent, widget_list, dividechars=0):
self.widget_size = len(widget_list)
super(FrameColumns, self).__init__(widget_list, dividechars)
self.parent = parent
def focus_next(self):
try:
self.focus_position += 1
if self.focus_position >= self.widget_size:
self.focus_position -= self.widget_size
new_body = [b for b in self.parent.bodies][self.focus_position]
self.parent.update_body(new_body)
except:
pass
def focus_previous(self):
try:
self.focus_position -= 1
if self.focus_position < 0:
self.focus_position += self.widget_size
new_body = [b for b in self.parent.bodies][self.focus_position]
self.parent.update_body(new_body)
except:
pass
class ButtonLabel(urwid.SelectableIcon):
def set_text(self, label):
'''
set_text is invoked by Button.set_label
'''
self.__super.set_text(label)
self._cursor_position = len(label) + 1
class MyButton(urwid.Button):
'''
- override __init__ to use our ButtonLabel instead of urwid.SelectableIcon
- make button_left and button_right plain strings and variable width -
any string, including an empty string, can be set and displayed
- otherwise, we leave Button behaviour unchanged
'''
button_left = "["
button_right = "]"
def __init__(self, label, on_press=None, user_data=None, borders=True, disabled=False):
self._label = ButtonLabel("")
if borders:
cols = urwid.Columns([
('fixed', len(self.button_left), urwid.Text(self.button_left)),
self._label,
('fixed', len(self.button_right), urwid.Text(self.button_right))],
dividechars=1)
else:
cols = urwid.Columns([self._label],
dividechars=0)
super(urwid.Button, self).__init__(cols)
self.disabled = disabled
if on_press:
urwid.connect_signal(self, 'click', on_press, user_data)
self.set_label(label)
self.lllavel = label
# @property
# def disabled(self):
# return self._disabled
# @disabled.setter
# def disabled(self, value):
# if self._disabled == value:
# return
# if self.disabled:
# urwid.AttrMap(self, "disabled")
# else:
# urwid.AttrMap(self, None, "line")
def selectable(self):
return not self.disabled
def attr_button(label, cmd=None, attr_map=None, focus_map = "line", align = "center", user_args = None, borders=True, disabled=False):
btn = create_button(label, cmd=cmd, align = align, user_args = user_args, borders=borders, disabled=disabled)
return urwid.AttrMap(btn, attr_map, focus_map=focus_map)
def create_button(label, cmd=None, align = "center", user_args = None, borders=True, disabled=False):
btn = MyButton(label, borders=borders, disabled=disabled)
btn._label.align = align
if cmd:
if user_args:
urwid.connect_signal(btn, "click", cmd, user_args = user_args)
else:
urwid.connect_signal(btn, "click", cmd)
return btn
| [
"urwid.Columns",
"urwid.LineBox",
"urwid.raw_display.Screen",
"urwid.SimpleFocusListWalker",
"rpg_game.utils.distance",
"urwid.SimpleListWalker",
"urwid.connect_signal",
"copy.deepcopy",
"urwid.AttrMap",
"urwid.Text"
] | [((25258, 25307), 'urwid.AttrMap', 'urwid.AttrMap', (['btn', 'attr_map'], {'focus_map': 'focus_map'}), '(btn, attr_map, focus_map=focus_map)\n', (25271, 25307), False, 'import urwid\n'), ((183, 203), 'urwid.raw_display.Screen', 'raw_display.Screen', ([], {}), '()\n', (201, 203), False, 'from urwid import raw_display\n'), ((2100, 2128), 'urwid.AttrMap', 'urwid.AttrMap', (['self', '"""frame"""'], {}), "(self, 'frame')\n", (2113, 2128), False, 'import urwid\n'), ((5199, 5232), 'urwid.SimpleFocusListWalker', 'urwid.SimpleFocusListWalker', (['line'], {}), '(line)\n', (5226, 5232), False, 'import urwid\n'), ((5241, 5306), 'urwid.connect_signal', 'urwid.connect_signal', (['walker', '"""modified"""', 'self.update_description'], {}), "(walker, 'modified', self.update_description)\n", (5261, 5306), False, 'import urwid\n'), ((14459, 14508), 'urwid.Text', 'urwid.Text', (['"""0/9-= to select\n\n"""'], {'align': '"""center"""'}), "('0/9-= to select\\n\\n', align='center')\n", (14469, 14508), False, 'import urwid\n'), ((14539, 14631), 'urwid.Text', 'urwid.Text', (['[(\'green\', f"{\'Enter:use/eqp\':<14s}"), (\'yellow\', \'Q:drop\')]'], {'align': '"""center"""'}), '([(\'green\', f"{\'Enter:use/eqp\':<14s}"), (\'yellow\', \'Q:drop\')],\n align=\'center\')\n', (14549, 14631), False, 'import urwid\n'), ((15009, 15026), 'urwid.Text', 'urwid.Text', (['_text'], {}), '(_text)\n', (15019, 15026), False, 'import urwid\n'), ((16148, 16163), 'urwid.Text', 'urwid.Text', (['"""║"""'], {}), "('║')\n", (16158, 16163), False, 'import urwid\n'), ((6516, 6609), 'urwid.Columns', 'urwid.Columns', (['[(self.map_width, self.map), (self.menu_width, self.menu)]'], {'focus_column': '(1)'}), '([(self.map_width, self.map), (self.menu_width, self.menu)],\n focus_column=1)\n', (6529, 6609), False, 'import urwid\n'), ((7759, 7798), 'urwid.Columns', 'urwid.Columns', (['_columns'], {'focus_column': '(1)'}), '(_columns, focus_column=1)\n', (7772, 7798), False, 'import urwid\n'), ((9168, 9203), 'urwid.Text', 'urwid.Text', (['_marker'], {'align': '"""center"""'}), "(_marker, align='center')\n", (9178, 9203), False, 'import urwid\n'), ((12312, 12351), 'copy.deepcopy', 'copy.deepcopy', (['self.player.location.map'], {}), '(self.player.location.map)\n', (12325, 12351), False, 'import time, os, copy\n'), ((13886, 13931), 'urwid.LineBox', 'urwid.LineBox', (['self.active_body'], {'title': '_title'}), '(self.active_body, title=_title)\n', (13899, 13931), False, 'import urwid\n'), ((14160, 14205), 'urwid.LineBox', 'urwid.LineBox', (['self.active_body'], {'title': '_title'}), '(self.active_body, title=_title)\n', (14173, 14205), False, 'import urwid\n'), ((14366, 14399), 'urwid.SimpleListWalker', 'urwid.SimpleListWalker', (['[columns]'], {}), '([columns])\n', (14388, 14399), False, 'import urwid\n'), ((14844, 14858), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (14854, 14858), False, 'import urwid\n'), ((19285, 19301), 'urwid.Text', 'urwid.Text', (['_top'], {}), '(_top)\n', (19295, 19301), False, 'import urwid\n'), ((24315, 24358), 'urwid.Columns', 'urwid.Columns', (['[self._label]'], {'dividechars': '(0)'}), '([self._label], dividechars=0)\n', (24328, 24358), False, 'import urwid\n'), ((24492, 24548), 'urwid.connect_signal', 'urwid.connect_signal', (['self', '"""click"""', 'on_press', 'user_data'], {}), "(self, 'click', on_press, user_data)\n", (24512, 24548), False, 'import urwid\n'), ((25549, 25609), 'urwid.connect_signal', 'urwid.connect_signal', (['btn', '"""click"""', 'cmd'], {'user_args': 'user_args'}), "(btn, 'click', cmd, user_args=user_args)\n", (25569, 25609), False, 'import urwid\n'), ((25638, 25677), 'urwid.connect_signal', 'urwid.connect_signal', (['btn', '"""click"""', 'cmd'], {}), "(btn, 'click', cmd)\n", (25658, 25677), False, 'import urwid\n'), ((9333, 9389), 'urwid.Text', 'urwid.Text', (['"""\nSET TERMINAL\nTO 80X24"""'], {'align': '"""center"""'}), '("""\nSET TERMINAL\nTO 80X24""", align=\'center\')\n', (9343, 9389), False, 'import urwid\n'), ((14320, 14334), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (14330, 14334), False, 'import urwid\n'), ((15272, 15348), 'urwid.Text', 'urwid.Text', (["[(i.color, f'{i.name}\\n'), f'{i.description}\\n']"], {'align': '"""center"""'}), "([(i.color, f'{i.name}\\n'), f'{i.description}\\n'], align='center')\n", (15282, 15348), False, 'import urwid\n'), ((16063, 16096), 'urwid.Text', 'urwid.Text', (['_text'], {'align': '"""center"""'}), "(_text, align='center')\n", (16073, 16096), False, 'import urwid\n'), ((20593, 20609), 'urwid.Text', 'urwid.Text', (['_top'], {}), '(_top)\n', (20603, 20609), False, 'import urwid\n'), ((21359, 21396), 'urwid.Text', 'urwid.Text', (['map_commands'], {'wrap': '"""clip"""'}), "(map_commands, wrap='clip')\n", (21369, 21396), False, 'import urwid\n'), ((21398, 21436), 'urwid.Text', 'urwid.Text', (['menu_commands'], {'wrap': '"""clip"""'}), "(menu_commands, wrap='clip')\n", (21408, 21436), False, 'import urwid\n'), ((21509, 21542), 'urwid.SimpleListWalker', 'urwid.SimpleListWalker', (['[columns]'], {}), '([columns])\n', (21531, 21542), False, 'import urwid\n'), ((3234, 3265), 'urwid.LineBox', 'urwid.LineBox', (['self.active_body'], {}), '(self.active_body)\n', (3247, 3265), False, 'import urwid\n'), ((3332, 3376), 'urwid.LineBox', 'urwid.LineBox', (['self.active_body'], {'title': 'title'}), '(self.active_body, title=title)\n', (3345, 3376), False, 'import urwid\n'), ((8021, 8065), 'rpg_game.utils.distance', 'distance', (['self.player.position', 'ent.position'], {}), '(self.player.position, ent.position)\n', (8029, 8065), False, 'from rpg_game.utils import log, mod, distance\n'), ((9237, 9255), 'urwid.LineBox', 'urwid.LineBox', (['btn'], {}), '(btn)\n', (9250, 9255), False, 'import urwid\n'), ((11836, 11850), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (11846, 11850), False, 'import urwid\n'), ((12949, 12978), 'urwid.Text', 'urwid.Text', (['line'], {'wrap': '"""clip"""'}), "(line, wrap='clip')\n", (12959, 12978), False, 'import urwid\n'), ((17154, 17168), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (17164, 17168), False, 'import urwid\n'), ((19318, 19335), 'urwid.Text', 'urwid.Text', (['_left'], {}), '(_left)\n', (19328, 19335), False, 'import urwid\n'), ((19337, 19355), 'urwid.Text', 'urwid.Text', (['_right'], {}), '(_right)\n', (19347, 19355), False, 'import urwid\n'), ((19504, 19518), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (19514, 19518), False, 'import urwid\n'), ((19871, 19925), 'urwid.Text', 'urwid.Text', (["[f'{_name}: ', (obj.color, f'{obj.name}')]"], {}), "([f'{_name}: ', (obj.color, f'{obj.name}')])\n", (19881, 19925), False, 'import urwid\n'), ((19976, 20002), 'urwid.Text', 'urwid.Text', (["[f'{_name}: ']"], {}), "([f'{_name}: '])\n", (19986, 20002), False, 'import urwid\n'), ((5518, 5558), 'urwid.Text', 'urwid.Text', (["self.descriptions['Warrior']"], {}), "(self.descriptions['Warrior'])\n", (5528, 5558), False, 'import urwid\n'), ((6062, 6099), 'urwid.Text', 'urwid.Text', (['self.descriptions[choice]'], {}), '(self.descriptions[choice])\n', (6072, 6099), False, 'import urwid\n'), ((11516, 11549), 'urwid.Text', 'urwid.Text', (['p.status'], {'wrap': '"""clip"""'}), "(p.status, wrap='clip')\n", (11526, 11549), False, 'import urwid\n'), ((16855, 16878), 'urwid.Text', 'urwid.Text', (['_marker_box'], {}), '(_marker_box)\n', (16865, 16878), False, 'import urwid\n'), ((24108, 24136), 'urwid.Text', 'urwid.Text', (['self.button_left'], {}), '(self.button_left)\n', (24118, 24136), False, 'import urwid\n'), ((24218, 24247), 'urwid.Text', 'urwid.Text', (['self.button_right'], {}), '(self.button_right)\n', (24228, 24247), False, 'import urwid\n'), ((6306, 6320), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (6316, 6320), False, 'import urwid\n'), ((7938, 7982), 'rpg_game.utils.distance', 'distance', (['self.player.position', 'ent.position'], {}), '(self.player.position, ent.position)\n', (7946, 7982), False, 'from rpg_game.utils import log, mod, distance\n')] |
from numpy import inf, nan
from sklearn.linear_model import LinearRegression as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class LinearRegressionImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LinearRegression Ordinary least squares Linear Regression.",
"allOf": [
{
"type": "object",
"required": ["fit_intercept", "normalize", "copy_X", "n_jobs"],
"relevantToOptimizer": ["fit_intercept", "normalize", "copy_X"],
"additionalProperties": False,
"properties": {
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to use for the computation",
},
},
},
{
"XXX TODO XXX": "Parameter: n_jobs > only provide speedup for n_targets > 1 and sufficient large problems"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target values",
},
"sample_weight": {
"type": "array",
"items": {"type": "number"},
"description": "Individual weights for each sample ",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
set_docstrings(LinearRegressionImpl, _combined_schemas)
LinearRegression = make_operator(LinearRegressionImpl, _combined_schemas)
| [
"lale.operators.make_operator",
"lale.docstrings.set_docstrings",
"sklearn.linear_model.LinearRegression"
] | [((4721, 4776), 'lale.docstrings.set_docstrings', 'set_docstrings', (['LinearRegressionImpl', '_combined_schemas'], {}), '(LinearRegressionImpl, _combined_schemas)\n', (4735, 4776), False, 'from lale.docstrings import set_docstrings\n'), ((4796, 4850), 'lale.operators.make_operator', 'make_operator', (['LinearRegressionImpl', '_combined_schemas'], {}), '(LinearRegressionImpl, _combined_schemas)\n', (4809, 4850), False, 'from lale.operators import make_operator\n'), ((307, 330), 'sklearn.linear_model.LinearRegression', 'Op', ([], {}), '(**self._hyperparams)\n', (309, 330), True, 'from sklearn.linear_model import LinearRegression as Op\n')] |
import numpy as np
from keras.applications.inception_v3 import InceptionV3
from keras.initializers import RandomNormal
from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D,
Cropping2D, Dense, Flatten, GlobalAveragePooling2D,
Input, Lambda, MaxPooling2D, Reshape, UpSampling2D,
ZeroPadding2D, ZeroPadding3D, add, concatenate)
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.models import Model
# Parameterized 2D Block Model
def BlockModel2D(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block CED model for segmentation problems
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_clean_{}'.format(rr))(x)
skip_list.append(x)
# expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
# Parameterized 2D Block Model
def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block model for pretraining on classification task
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_skip_{}'.format(rr))(x)
# average pooling
x = GlobalAveragePooling2D()(x)
# classifier
lay_out = Dense(1, activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def ConvertEncoderToCED(model):
# Returns a model with frozen encoder layers
# and complimentary, unfrozen decoder layers
# get input layer
# model must be compiled again after using this function
lay_input = model.input
# get skip connection layer outputs
skip_list = [l.output for l in model.layers if 'skip' in l.name]
numBlocks = len(skip_list)
filt_num = int(skip_list[0].shape[-1])
x = model.layers[-3].output
# freeze encoder layers
for layer in model.layers:
layer.trainable = False
use_bn = True
# make expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def Inception_model(input_shape=(299, 299, 3)):
incep_model = InceptionV3(
include_top=False, weights=None, input_shape=input_shape, pooling='avg')
input_layer = incep_model.input
incep_output = incep_model.output
# x = Conv2D(16, (3, 3), activation='relu')(incep_output)
# x = Flatten()(x)
x = Dense(1, activation='sigmoid')(incep_output)
return Model(inputs=input_layer, outputs=x)
| [
"keras.layers.Conv2D",
"numpy.max",
"keras.layers.Dense",
"keras.layers.Input",
"keras.models.Model",
"keras.applications.inception_v3.InceptionV3",
"keras.layers.GlobalAveragePooling2D",
"keras.layers.BatchNormalization",
"numpy.arange"
] | [((1786, 1810), 'numpy.max', 'np.max', (['input_shape[0:2]'], {}), '(input_shape[0:2])\n', (1792, 1810), True, 'import numpy as np\n'), ((2022, 2066), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'name': '"""input_layer"""'}), "(shape=input_shape, name='input_layer')\n", (2027, 2066), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((5990, 6015), 'keras.models.Model', 'Model', (['lay_input', 'lay_out'], {}), '(lay_input, lay_out)\n', (5995, 6015), False, 'from keras.models import Model\n'), ((7306, 7330), 'numpy.max', 'np.max', (['input_shape[0:2]'], {}), '(input_shape[0:2])\n', (7312, 7330), True, 'import numpy as np\n'), ((7542, 7586), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'name': '"""input_layer"""'}), "(shape=input_shape, name='input_layer')\n", (7547, 7586), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((9558, 9583), 'keras.models.Model', 'Model', (['lay_input', 'lay_out'], {}), '(lay_input, lay_out)\n', (9563, 9583), False, 'from keras.models import Model\n'), ((12241, 12266), 'keras.models.Model', 'Model', (['lay_input', 'lay_out'], {}), '(lay_input, lay_out)\n', (12246, 12266), False, 'from keras.models import Model\n'), ((12335, 12423), 'keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'include_top': '(False)', 'weights': 'None', 'input_shape': 'input_shape', 'pooling': '"""avg"""'}), "(include_top=False, weights=None, input_shape=input_shape,\n pooling='avg')\n", (12346, 12423), False, 'from keras.applications.inception_v3 import InceptionV3\n'), ((12652, 12688), 'keras.models.Model', 'Model', ([], {'inputs': 'input_layer', 'outputs': 'x'}), '(inputs=input_layer, outputs=x)\n', (12657, 12688), False, 'from keras.models import Model\n'), ((5914, 5974), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(1, 1)'], {'activation': '"""sigmoid"""', 'name': '"""output_layer"""'}), "(1, (1, 1), activation='sigmoid', name='output_layer')\n", (5920, 5974), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((9432, 9456), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (9454, 9456), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((9491, 9542), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""output_layer"""'}), "(1, activation='sigmoid', name='output_layer')\n", (9496, 9542), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((12165, 12225), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(1, 1)'], {'activation': '"""sigmoid"""', 'name': '"""output_layer"""'}), "(1, (1, 1), activation='sigmoid', name='output_layer')\n", (12171, 12225), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((12596, 12626), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (12601, 12626), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((2311, 2331), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2329, 2331), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((2530, 2550), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2548, 2550), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((2753, 2773), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2771, 2773), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((2982, 3002), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3000, 3002), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((3272, 3292), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3290, 3292), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((3582, 3602), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3600, 3602), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((3807, 3827), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3825, 3827), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((4312, 4332), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4330, 4332), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((4534, 4554), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4552, 4554), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((4760, 4780), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4778, 4780), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((4992, 5012), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5010, 5012), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((5286, 5306), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5304, 5306), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((5580, 5600), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5598, 5600), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((5805, 5825), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5823, 5825), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((7831, 7851), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7849, 7851), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((8050, 8070), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8068, 8070), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((8273, 8293), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8291, 8293), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((8502, 8522), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8520, 8522), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((8792, 8812), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8810, 8812), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((9102, 9122), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9120, 9122), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((9327, 9347), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9345, 9347), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((10563, 10583), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (10581, 10583), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((10785, 10805), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (10803, 10805), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((11011, 11031), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11029, 11031), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((11243, 11263), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11261, 11263), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((11537, 11557), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11555, 11557), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((11831, 11851), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11849, 11851), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((12056, 12076), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (12074, 12076), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((1846, 1873), 'numpy.arange', 'np.arange', (['(1)', '(numBlocks + 1)'], {}), '(1, numBlocks + 1)\n', (1855, 1873), True, 'import numpy as np\n'), ((7366, 7393), 'numpy.arange', 'np.arange', (['(1)', '(numBlocks + 1)'], {}), '(1, numBlocks + 1)\n', (7375, 7393), True, 'import numpy as np\n')] |
#!/bin/env python
"""Drop and create a new database with schema."""
from sqlalchemy_utils.functions import database_exists, create_database, drop_database
from flunkybot.db import engine, base
from flunkybot.models import * # noqa
db_url = engine.url
if database_exists(db_url):
drop_database(db_url)
create_database(db_url)
base.metadata.drop_all()
base.metadata.create_all()
| [
"sqlalchemy_utils.functions.create_database",
"sqlalchemy_utils.functions.drop_database",
"sqlalchemy_utils.functions.database_exists",
"flunkybot.db.base.metadata.drop_all",
"flunkybot.db.base.metadata.create_all"
] | [((257, 280), 'sqlalchemy_utils.functions.database_exists', 'database_exists', (['db_url'], {}), '(db_url)\n', (272, 280), False, 'from sqlalchemy_utils.functions import database_exists, create_database, drop_database\n'), ((308, 331), 'sqlalchemy_utils.functions.create_database', 'create_database', (['db_url'], {}), '(db_url)\n', (323, 331), False, 'from sqlalchemy_utils.functions import database_exists, create_database, drop_database\n'), ((334, 358), 'flunkybot.db.base.metadata.drop_all', 'base.metadata.drop_all', ([], {}), '()\n', (356, 358), False, 'from flunkybot.db import engine, base\n'), ((359, 385), 'flunkybot.db.base.metadata.create_all', 'base.metadata.create_all', ([], {}), '()\n', (383, 385), False, 'from flunkybot.db import engine, base\n'), ((286, 307), 'sqlalchemy_utils.functions.drop_database', 'drop_database', (['db_url'], {}), '(db_url)\n', (299, 307), False, 'from sqlalchemy_utils.functions import database_exists, create_database, drop_database\n')] |
"""This script renames the forthcoming section in changelog files with the upcoming version and the current date"""
from __future__ import print_function
import argparse
import datetime
import docutils.core
import os
import re
import sys
from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path
from catkin_pkg.changelog_generator import FORTHCOMING_LABEL
from catkin_pkg.package_version import bump_version
from catkin_pkg.packages import find_packages, verify_equal_package_versions
def get_forthcoming_label(rst):
document = docutils.core.publish_doctree(rst)
forthcoming_label = None
for child in document.children:
title = None
if isinstance(child, docutils.nodes.subtitle):
title = child
elif isinstance(child, docutils.nodes.section):
section = child
if len(section.children) > 0 and isinstance(section.children[0], docutils.nodes.title):
title = section.children[0]
if title and len(title.children) > 0 and isinstance(title.children[0], docutils.nodes.Text):
title_text = title.children[0].rawsource
if FORTHCOMING_LABEL.lower() in title_text.lower():
if forthcoming_label:
raise RuntimeError('Found multiple forthcoming sections')
forthcoming_label = title_text
return forthcoming_label
def rename_section(data, old_label, new_label):
valid_section_characters = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
def replace_section(match):
section_char = match.group(2)[0]
return new_label + '\n' + section_char * len(new_label)
pattern = '^(' + re.escape(old_label) + ')\n([' + re.escape(valid_section_characters) + ']+)$'
data, count = re.subn(pattern, replace_section, data, flags=re.MULTILINE)
if count == 0:
raise RuntimeError('Could not find section')
if count > 1:
raise RuntimeError('Found multiple matching sections')
return data
def main(sysargs=None):
parser = argparse.ArgumentParser(description='Tag the forthcoming section in the changelog files with an upcoming version number')
parser.add_argument('--bump', choices=('major', 'minor', 'patch'), default='patch', help='Which part of the version number to bump? (default: %(default)s)')
args = parser.parse_args(sysargs)
base_path = '.'
# find packages
packages = find_packages(base_path)
if not packages:
raise RuntimeError('No packages found')
print('Found packages: %s' % ', '.join([p.name for p in packages.values()]))
# fetch current version and verify that all packages have same version number
old_version = verify_equal_package_versions(packages.values())
new_version = bump_version(old_version, args.bump)
print('Tag version %s' % new_version)
# check for changelog entries
changelogs = []
missing_forthcoming = []
already_tagged = []
for pkg_path, package in packages.items():
changelog_path = os.path.join(base_path, pkg_path, CHANGELOG_FILENAME)
if not os.path.exists(changelog_path):
missing_forthcoming.append(package.name)
continue
changelog = get_changelog_from_path(changelog_path, package.name)
if not changelog:
missing_forthcoming.append(package.name)
continue
# check that forthcoming section exists
forthcoming_label = get_forthcoming_label(changelog.rst)
if not forthcoming_label:
missing_forthcoming.append(package.name)
continue
# check that new_version section does not exist yet
try:
changelog.get_content_of_version(new_version)
already_tagged.append(package.name)
continue
except KeyError:
pass
changelogs.append((package.name, changelog_path, changelog, forthcoming_label))
if missing_forthcoming:
print('The following packages do not have a forthcoming section in their changelog file: %s' % ', '.join(sorted(missing_forthcoming)), file=sys.stderr)
if already_tagged:
print("The following packages do already have a section '%s' in their changelog file: %s" % (new_version, ', '.join(sorted(already_tagged))), file=sys.stderr)
# rename forthcoming sections to new_version including current date
new_changelog_data = []
new_label = '%s (%s)' % (new_version, datetime.date.today().isoformat())
for (pkg_name, changelog_path, changelog, forthcoming_label) in changelogs:
print("Renaming section '%s' to '%s' in package '%s'..." % (forthcoming_label, new_label, pkg_name))
data = rename_section(changelog.rst, forthcoming_label, new_label)
new_changelog_data.append((changelog_path, data))
print('Writing updated changelog files...')
for (changelog_path, data) in new_changelog_data:
with open(changelog_path, 'wb') as f:
f.write(data.encode('utf-8'))
| [
"os.path.exists",
"re.escape",
"catkin_pkg.packages.find_packages",
"argparse.ArgumentParser",
"os.path.join",
"catkin_pkg.changelog.get_changelog_from_path",
"datetime.date.today",
"catkin_pkg.changelog_generator.FORTHCOMING_LABEL.lower",
"catkin_pkg.package_version.bump_version",
"re.subn"
] | [((1770, 1829), 're.subn', 're.subn', (['pattern', 'replace_section', 'data'], {'flags': 're.MULTILINE'}), '(pattern, replace_section, data, flags=re.MULTILINE)\n', (1777, 1829), False, 'import re\n'), ((2038, 2169), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tag the forthcoming section in the changelog files with an upcoming version number"""'}), "(description=\n 'Tag the forthcoming section in the changelog files with an upcoming version number'\n )\n", (2061, 2169), False, 'import argparse\n'), ((2416, 2440), 'catkin_pkg.packages.find_packages', 'find_packages', (['base_path'], {}), '(base_path)\n', (2429, 2440), False, 'from catkin_pkg.packages import find_packages, verify_equal_package_versions\n'), ((2759, 2795), 'catkin_pkg.package_version.bump_version', 'bump_version', (['old_version', 'args.bump'], {}), '(old_version, args.bump)\n', (2771, 2795), False, 'from catkin_pkg.package_version import bump_version\n'), ((3018, 3071), 'os.path.join', 'os.path.join', (['base_path', 'pkg_path', 'CHANGELOG_FILENAME'], {}), '(base_path, pkg_path, CHANGELOG_FILENAME)\n', (3030, 3071), False, 'import os\n'), ((3213, 3266), 'catkin_pkg.changelog.get_changelog_from_path', 'get_changelog_from_path', (['changelog_path', 'package.name'], {}), '(changelog_path, package.name)\n', (3236, 3266), False, 'from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path\n'), ((1707, 1742), 're.escape', 're.escape', (['valid_section_characters'], {}), '(valid_section_characters)\n', (1716, 1742), False, 'import re\n'), ((3087, 3117), 'os.path.exists', 'os.path.exists', (['changelog_path'], {}), '(changelog_path)\n', (3101, 3117), False, 'import os\n'), ((1156, 1181), 'catkin_pkg.changelog_generator.FORTHCOMING_LABEL.lower', 'FORTHCOMING_LABEL.lower', ([], {}), '()\n', (1179, 1181), False, 'from catkin_pkg.changelog_generator import FORTHCOMING_LABEL\n'), ((1674, 1694), 're.escape', 're.escape', (['old_label'], {}), '(old_label)\n', (1683, 1694), False, 'import re\n'), ((4439, 4460), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (4458, 4460), False, 'import datetime\n')] |
import os
import unittest
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
import torch.nn as nn
from machina.optims import DistributedAdamW
def init_processes(rank, world_size,
function, backend='tcp'):
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank,
world_size=world_size)
function(rank, world_size)
class TestDistributedAdamW(unittest.TestCase):
def test_step(self):
def _run(rank, world_size):
model = nn.Linear(10, 1)
optimizer = DistributedAdamW(
model.parameters())
optimizer.zero_grad()
loss = model(torch.ones(10).float())
loss.backward()
optimizer.step()
processes = []
world_size = 4
for rank in range(world_size):
p = Process(target=init_processes,
args=(rank,
world_size,
_run))
p.start()
processes.append(p)
for p in processes:
p.join()
| [
"torch.multiprocessing.Process",
"torch.distributed.init_process_group",
"torch.nn.Linear",
"torch.ones"
] | [((354, 420), 'torch.distributed.init_process_group', 'dist.init_process_group', (['backend'], {'rank': 'rank', 'world_size': 'world_size'}), '(backend, rank=rank, world_size=world_size)\n', (377, 420), True, 'import torch.distributed as dist\n'), ((612, 628), 'torch.nn.Linear', 'nn.Linear', (['(10)', '(1)'], {}), '(10, 1)\n', (621, 628), True, 'import torch.nn as nn\n'), ((950, 1011), 'torch.multiprocessing.Process', 'Process', ([], {'target': 'init_processes', 'args': '(rank, world_size, _run)'}), '(target=init_processes, args=(rank, world_size, _run))\n', (957, 1011), False, 'from torch.multiprocessing import Process\n'), ((767, 781), 'torch.ones', 'torch.ones', (['(10)'], {}), '(10)\n', (777, 781), False, 'import torch\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.