id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 51
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
400 | gmr/queries | queries/pool.py | PoolManager.shutdown | def shutdown(cls):
"""Close all connections on in all pools"""
for pid in list(cls._pools.keys()):
cls._pools[pid].shutdown()
LOGGER.info('Shutdown complete, all pooled connections closed') | python | def shutdown(cls):
for pid in list(cls._pools.keys()):
cls._pools[pid].shutdown()
LOGGER.info('Shutdown complete, all pooled connections closed') | [
"def",
"shutdown",
"(",
"cls",
")",
":",
"for",
"pid",
"in",
"list",
"(",
"cls",
".",
"_pools",
".",
"keys",
"(",
")",
")",
":",
"cls",
".",
"_pools",
"[",
"pid",
"]",
".",
"shutdown",
"(",
")",
"LOGGER",
".",
"info",
"(",
"'Shutdown complete, all pooled connections closed'",
")"
] | Close all connections on in all pools | [
"Close",
"all",
"connections",
"on",
"in",
"all",
"pools"
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/pool.py#L653-L657 |
401 | gmr/queries | queries/pool.py | PoolManager.size | def size(cls, pid):
"""Return the number of connections in the pool
:param str pid: The pool id
:rtype int
"""
with cls._lock:
cls._ensure_pool_exists(pid)
return len(cls._pools[pid]) | python | def size(cls, pid):
with cls._lock:
cls._ensure_pool_exists(pid)
return len(cls._pools[pid]) | [
"def",
"size",
"(",
"cls",
",",
"pid",
")",
":",
"with",
"cls",
".",
"_lock",
":",
"cls",
".",
"_ensure_pool_exists",
"(",
"pid",
")",
"return",
"len",
"(",
"cls",
".",
"_pools",
"[",
"pid",
"]",
")"
] | Return the number of connections in the pool
:param str pid: The pool id
:rtype int | [
"Return",
"the",
"number",
"of",
"connections",
"in",
"the",
"pool"
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/pool.py#L660-L669 |
402 | gmr/queries | queries/pool.py | PoolManager.report | def report(cls):
"""Return the state of the all of the registered pools.
:rtype: dict
"""
return {
'timestamp': datetime.datetime.utcnow().isoformat(),
'process': os.getpid(),
'pools': dict([(i, p.report()) for i, p in cls._pools.items()])
} | python | def report(cls):
return {
'timestamp': datetime.datetime.utcnow().isoformat(),
'process': os.getpid(),
'pools': dict([(i, p.report()) for i, p in cls._pools.items()])
} | [
"def",
"report",
"(",
"cls",
")",
":",
"return",
"{",
"'timestamp'",
":",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
",",
"'process'",
":",
"os",
".",
"getpid",
"(",
")",
",",
"'pools'",
":",
"dict",
"(",
"[",
"(",
"i",
",",
"p",
".",
"report",
"(",
")",
")",
"for",
"i",
",",
"p",
"in",
"cls",
".",
"_pools",
".",
"items",
"(",
")",
"]",
")",
"}"
] | Return the state of the all of the registered pools.
:rtype: dict | [
"Return",
"the",
"state",
"of",
"the",
"all",
"of",
"the",
"registered",
"pools",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/pool.py#L672-L682 |
403 | gmr/queries | queries/pool.py | PoolManager._maybe_remove_pool | def _maybe_remove_pool(cls, pid):
"""If the pool has no open connections, remove it
:param str pid: The pool id to clean
"""
if not len(cls._pools[pid]):
del cls._pools[pid] | python | def _maybe_remove_pool(cls, pid):
if not len(cls._pools[pid]):
del cls._pools[pid] | [
"def",
"_maybe_remove_pool",
"(",
"cls",
",",
"pid",
")",
":",
"if",
"not",
"len",
"(",
"cls",
".",
"_pools",
"[",
"pid",
"]",
")",
":",
"del",
"cls",
".",
"_pools",
"[",
"pid",
"]"
] | If the pool has no open connections, remove it
:param str pid: The pool id to clean | [
"If",
"the",
"pool",
"has",
"no",
"open",
"connections",
"remove",
"it"
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/pool.py#L697-L704 |
404 | gmr/queries | queries/session.py | Session.close | def close(self):
"""Explicitly close the connection and remove it from the connection
pool if pooling is enabled. If the connection is already closed
:raises: psycopg2.InterfaceError
"""
if not self._conn:
raise psycopg2.InterfaceError('Connection not open')
LOGGER.info('Closing connection %r in %s', self._conn, self.pid)
self._pool_manager.free(self.pid, self._conn)
self._pool_manager.remove_connection(self.pid, self._conn)
# Un-assign the connection and cursor
self._conn, self._cursor = None, None | python | def close(self):
if not self._conn:
raise psycopg2.InterfaceError('Connection not open')
LOGGER.info('Closing connection %r in %s', self._conn, self.pid)
self._pool_manager.free(self.pid, self._conn)
self._pool_manager.remove_connection(self.pid, self._conn)
# Un-assign the connection and cursor
self._conn, self._cursor = None, None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_conn",
":",
"raise",
"psycopg2",
".",
"InterfaceError",
"(",
"'Connection not open'",
")",
"LOGGER",
".",
"info",
"(",
"'Closing connection %r in %s'",
",",
"self",
".",
"_conn",
",",
"self",
".",
"pid",
")",
"self",
".",
"_pool_manager",
".",
"free",
"(",
"self",
".",
"pid",
",",
"self",
".",
"_conn",
")",
"self",
".",
"_pool_manager",
".",
"remove_connection",
"(",
"self",
".",
"pid",
",",
"self",
".",
"_conn",
")",
"# Un-assign the connection and cursor",
"self",
".",
"_conn",
",",
"self",
".",
"_cursor",
"=",
"None",
",",
"None"
] | Explicitly close the connection and remove it from the connection
pool if pooling is enabled. If the connection is already closed
:raises: psycopg2.InterfaceError | [
"Explicitly",
"close",
"the",
"connection",
"and",
"remove",
"it",
"from",
"the",
"connection",
"pool",
"if",
"pooling",
"is",
"enabled",
".",
"If",
"the",
"connection",
"is",
"already",
"closed"
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L131-L145 |
405 | gmr/queries | queries/session.py | Session.pid | def pid(self):
"""Return the pool ID used for connection pooling.
:rtype: str
"""
return hashlib.md5(':'.join([self.__class__.__name__,
self._uri]).encode('utf-8')).hexdigest() | python | def pid(self):
return hashlib.md5(':'.join([self.__class__.__name__,
self._uri]).encode('utf-8')).hexdigest() | [
"def",
"pid",
"(",
"self",
")",
":",
"return",
"hashlib",
".",
"md5",
"(",
"':'",
".",
"join",
"(",
"[",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"_uri",
"]",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")"
] | Return the pool ID used for connection pooling.
:rtype: str | [
"Return",
"the",
"pool",
"ID",
"used",
"for",
"connection",
"pooling",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L184-L191 |
406 | gmr/queries | queries/session.py | Session.set_encoding | def set_encoding(self, value=DEFAULT_ENCODING):
"""Set the client encoding for the session if the value specified
is different than the current client encoding.
:param str value: The encoding value to use
"""
if self._conn.encoding != value:
self._conn.set_client_encoding(value) | python | def set_encoding(self, value=DEFAULT_ENCODING):
if self._conn.encoding != value:
self._conn.set_client_encoding(value) | [
"def",
"set_encoding",
"(",
"self",
",",
"value",
"=",
"DEFAULT_ENCODING",
")",
":",
"if",
"self",
".",
"_conn",
".",
"encoding",
"!=",
"value",
":",
"self",
".",
"_conn",
".",
"set_client_encoding",
"(",
"value",
")"
] | Set the client encoding for the session if the value specified
is different than the current client encoding.
:param str value: The encoding value to use | [
"Set",
"the",
"client",
"encoding",
"for",
"the",
"session",
"if",
"the",
"value",
"specified",
"is",
"different",
"than",
"the",
"current",
"client",
"encoding",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L221-L229 |
407 | gmr/queries | queries/session.py | Session._cleanup | def _cleanup(self):
"""Remove the connection from the stack, closing out the cursor"""
if self._cursor:
LOGGER.debug('Closing the cursor on %s', self.pid)
self._cursor.close()
self._cursor = None
if self._conn:
LOGGER.debug('Freeing %s in the pool', self.pid)
try:
pool.PoolManager.instance().free(self.pid, self._conn)
except pool.ConnectionNotFoundError:
pass
self._conn = None | python | def _cleanup(self):
if self._cursor:
LOGGER.debug('Closing the cursor on %s', self.pid)
self._cursor.close()
self._cursor = None
if self._conn:
LOGGER.debug('Freeing %s in the pool', self.pid)
try:
pool.PoolManager.instance().free(self.pid, self._conn)
except pool.ConnectionNotFoundError:
pass
self._conn = None | [
"def",
"_cleanup",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cursor",
":",
"LOGGER",
".",
"debug",
"(",
"'Closing the cursor on %s'",
",",
"self",
".",
"pid",
")",
"self",
".",
"_cursor",
".",
"close",
"(",
")",
"self",
".",
"_cursor",
"=",
"None",
"if",
"self",
".",
"_conn",
":",
"LOGGER",
".",
"debug",
"(",
"'Freeing %s in the pool'",
",",
"self",
".",
"pid",
")",
"try",
":",
"pool",
".",
"PoolManager",
".",
"instance",
"(",
")",
".",
"free",
"(",
"self",
".",
"pid",
",",
"self",
".",
"_conn",
")",
"except",
"pool",
".",
"ConnectionNotFoundError",
":",
"pass",
"self",
".",
"_conn",
"=",
"None"
] | Remove the connection from the stack, closing out the cursor | [
"Remove",
"the",
"connection",
"from",
"the",
"stack",
"closing",
"out",
"the",
"cursor"
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L258-L271 |
408 | gmr/queries | queries/session.py | Session._get_cursor | def _get_cursor(self, connection, name=None):
"""Return a cursor for the given cursor_factory. Specify a name to
use server-side cursors.
:param connection: The connection to create a cursor on
:type connection: psycopg2.extensions.connection
:param str name: A cursor name for a server side cursor
:rtype: psycopg2.extensions.cursor
"""
cursor = connection.cursor(name=name,
cursor_factory=self._cursor_factory)
if name is not None:
cursor.scrollable = True
cursor.withhold = True
return cursor | python | def _get_cursor(self, connection, name=None):
cursor = connection.cursor(name=name,
cursor_factory=self._cursor_factory)
if name is not None:
cursor.scrollable = True
cursor.withhold = True
return cursor | [
"def",
"_get_cursor",
"(",
"self",
",",
"connection",
",",
"name",
"=",
"None",
")",
":",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
"name",
"=",
"name",
",",
"cursor_factory",
"=",
"self",
".",
"_cursor_factory",
")",
"if",
"name",
"is",
"not",
"None",
":",
"cursor",
".",
"scrollable",
"=",
"True",
"cursor",
".",
"withhold",
"=",
"True",
"return",
"cursor"
] | Return a cursor for the given cursor_factory. Specify a name to
use server-side cursors.
:param connection: The connection to create a cursor on
:type connection: psycopg2.extensions.connection
:param str name: A cursor name for a server side cursor
:rtype: psycopg2.extensions.cursor | [
"Return",
"a",
"cursor",
"for",
"the",
"given",
"cursor_factory",
".",
"Specify",
"a",
"name",
"to",
"use",
"server",
"-",
"side",
"cursors",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L309-L324 |
409 | gmr/queries | queries/session.py | Session._register_unicode | def _register_unicode(connection):
"""Register the cursor to be able to receive Unicode string.
:type connection: psycopg2.extensions.connection
:param connection: Where to register things
"""
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE,
connection)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY,
connection) | python | def _register_unicode(connection):
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE,
connection)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY,
connection) | [
"def",
"_register_unicode",
"(",
"connection",
")",
":",
"psycopg2",
".",
"extensions",
".",
"register_type",
"(",
"psycopg2",
".",
"extensions",
".",
"UNICODE",
",",
"connection",
")",
"psycopg2",
".",
"extensions",
".",
"register_type",
"(",
"psycopg2",
".",
"extensions",
".",
"UNICODEARRAY",
",",
"connection",
")"
] | Register the cursor to be able to receive Unicode string.
:type connection: psycopg2.extensions.connection
:param connection: Where to register things | [
"Register",
"the",
"cursor",
"to",
"be",
"able",
"to",
"receive",
"Unicode",
"string",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L345-L355 |
410 | gmr/queries | queries/session.py | Session._status | def _status(self):
"""Return the current connection status as an integer value.
The status should match one of the following constants:
- queries.Session.INTRANS: Connection established, in transaction
- queries.Session.PREPARED: Prepared for second phase of transaction
- queries.Session.READY: Connected, no active transaction
:rtype: int
"""
if self._conn.status == psycopg2.extensions.STATUS_BEGIN:
return self.READY
return self._conn.status | python | def _status(self):
if self._conn.status == psycopg2.extensions.STATUS_BEGIN:
return self.READY
return self._conn.status | [
"def",
"_status",
"(",
"self",
")",
":",
"if",
"self",
".",
"_conn",
".",
"status",
"==",
"psycopg2",
".",
"extensions",
".",
"STATUS_BEGIN",
":",
"return",
"self",
".",
"READY",
"return",
"self",
".",
"_conn",
".",
"status"
] | Return the current connection status as an integer value.
The status should match one of the following constants:
- queries.Session.INTRANS: Connection established, in transaction
- queries.Session.PREPARED: Prepared for second phase of transaction
- queries.Session.READY: Connected, no active transaction
:rtype: int | [
"Return",
"the",
"current",
"connection",
"status",
"as",
"an",
"integer",
"value",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L368-L382 |
411 | gmr/queries | queries/results.py | Results.items | def items(self):
"""Return all of the rows that are in the result set.
:rtype: list
"""
if not self.cursor.rowcount:
return []
self.cursor.scroll(0, 'absolute')
return self.cursor.fetchall() | python | def items(self):
if not self.cursor.rowcount:
return []
self.cursor.scroll(0, 'absolute')
return self.cursor.fetchall() | [
"def",
"items",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"cursor",
".",
"rowcount",
":",
"return",
"[",
"]",
"self",
".",
"cursor",
".",
"scroll",
"(",
"0",
",",
"'absolute'",
")",
"return",
"self",
".",
"cursor",
".",
"fetchall",
"(",
")"
] | Return all of the rows that are in the result set.
:rtype: list | [
"Return",
"all",
"of",
"the",
"rows",
"that",
"are",
"in",
"the",
"result",
"set",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/results.py#L98-L108 |
412 | gmr/queries | queries/utils.py | get_current_user | def get_current_user():
"""Return the current username for the logged in user
:rtype: str
"""
if pwd is None:
return getpass.getuser()
else:
try:
return pwd.getpwuid(os.getuid())[0]
except KeyError as error:
LOGGER.error('Could not get logged-in user: %s', error) | python | def get_current_user():
if pwd is None:
return getpass.getuser()
else:
try:
return pwd.getpwuid(os.getuid())[0]
except KeyError as error:
LOGGER.error('Could not get logged-in user: %s', error) | [
"def",
"get_current_user",
"(",
")",
":",
"if",
"pwd",
"is",
"None",
":",
"return",
"getpass",
".",
"getuser",
"(",
")",
"else",
":",
"try",
":",
"return",
"pwd",
".",
"getpwuid",
"(",
"os",
".",
"getuid",
"(",
")",
")",
"[",
"0",
"]",
"except",
"KeyError",
"as",
"error",
":",
"LOGGER",
".",
"error",
"(",
"'Could not get logged-in user: %s'",
",",
"error",
")"
] | Return the current username for the logged in user
:rtype: str | [
"Return",
"the",
"current",
"username",
"for",
"the",
"logged",
"in",
"user"
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/utils.py#L57-L69 |
413 | gmr/queries | queries/utils.py | uri | def uri(host='localhost', port=5432, dbname='postgres', user='postgres',
password=None):
"""Return a PostgreSQL connection URI for the specified values.
:param str host: Host to connect to
:param int port: Port to connect on
:param str dbname: The database name
:param str user: User to connect as
:param str password: The password to use, None for no password
:return str: The PostgreSQL connection URI
"""
if port:
host = '%s:%s' % (host, port)
if password:
return 'postgresql://%s:%s@%s/%s' % (user, password, host, dbname)
return 'postgresql://%s@%s/%s' % (user, host, dbname) | python | def uri(host='localhost', port=5432, dbname='postgres', user='postgres',
password=None):
if port:
host = '%s:%s' % (host, port)
if password:
return 'postgresql://%s:%s@%s/%s' % (user, password, host, dbname)
return 'postgresql://%s@%s/%s' % (user, host, dbname) | [
"def",
"uri",
"(",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"5432",
",",
"dbname",
"=",
"'postgres'",
",",
"user",
"=",
"'postgres'",
",",
"password",
"=",
"None",
")",
":",
"if",
"port",
":",
"host",
"=",
"'%s:%s'",
"%",
"(",
"host",
",",
"port",
")",
"if",
"password",
":",
"return",
"'postgresql://%s:%s@%s/%s'",
"%",
"(",
"user",
",",
"password",
",",
"host",
",",
"dbname",
")",
"return",
"'postgresql://%s@%s/%s'",
"%",
"(",
"user",
",",
"host",
",",
"dbname",
")"
] | Return a PostgreSQL connection URI for the specified values.
:param str host: Host to connect to
:param int port: Port to connect on
:param str dbname: The database name
:param str user: User to connect as
:param str password: The password to use, None for no password
:return str: The PostgreSQL connection URI | [
"Return",
"a",
"PostgreSQL",
"connection",
"URI",
"for",
"the",
"specified",
"values",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/utils.py#L82-L98 |
414 | gmr/queries | queries/utils.py | uri_to_kwargs | def uri_to_kwargs(uri):
"""Return a URI as kwargs for connecting to PostgreSQL with psycopg2,
applying default values for non-specified areas of the URI.
:param str uri: The connection URI
:rtype: dict
"""
parsed = urlparse(uri)
default_user = get_current_user()
password = unquote(parsed.password) if parsed.password else None
kwargs = {'host': parsed.hostname,
'port': parsed.port,
'dbname': parsed.path[1:] or default_user,
'user': parsed.username or default_user,
'password': password}
values = parse_qs(parsed.query)
if 'host' in values:
kwargs['host'] = values['host'][0]
for k in [k for k in values if k in KEYWORDS]:
kwargs[k] = values[k][0] if len(values[k]) == 1 else values[k]
try:
if kwargs[k].isdigit():
kwargs[k] = int(kwargs[k])
except AttributeError:
pass
return kwargs | python | def uri_to_kwargs(uri):
parsed = urlparse(uri)
default_user = get_current_user()
password = unquote(parsed.password) if parsed.password else None
kwargs = {'host': parsed.hostname,
'port': parsed.port,
'dbname': parsed.path[1:] or default_user,
'user': parsed.username or default_user,
'password': password}
values = parse_qs(parsed.query)
if 'host' in values:
kwargs['host'] = values['host'][0]
for k in [k for k in values if k in KEYWORDS]:
kwargs[k] = values[k][0] if len(values[k]) == 1 else values[k]
try:
if kwargs[k].isdigit():
kwargs[k] = int(kwargs[k])
except AttributeError:
pass
return kwargs | [
"def",
"uri_to_kwargs",
"(",
"uri",
")",
":",
"parsed",
"=",
"urlparse",
"(",
"uri",
")",
"default_user",
"=",
"get_current_user",
"(",
")",
"password",
"=",
"unquote",
"(",
"parsed",
".",
"password",
")",
"if",
"parsed",
".",
"password",
"else",
"None",
"kwargs",
"=",
"{",
"'host'",
":",
"parsed",
".",
"hostname",
",",
"'port'",
":",
"parsed",
".",
"port",
",",
"'dbname'",
":",
"parsed",
".",
"path",
"[",
"1",
":",
"]",
"or",
"default_user",
",",
"'user'",
":",
"parsed",
".",
"username",
"or",
"default_user",
",",
"'password'",
":",
"password",
"}",
"values",
"=",
"parse_qs",
"(",
"parsed",
".",
"query",
")",
"if",
"'host'",
"in",
"values",
":",
"kwargs",
"[",
"'host'",
"]",
"=",
"values",
"[",
"'host'",
"]",
"[",
"0",
"]",
"for",
"k",
"in",
"[",
"k",
"for",
"k",
"in",
"values",
"if",
"k",
"in",
"KEYWORDS",
"]",
":",
"kwargs",
"[",
"k",
"]",
"=",
"values",
"[",
"k",
"]",
"[",
"0",
"]",
"if",
"len",
"(",
"values",
"[",
"k",
"]",
")",
"==",
"1",
"else",
"values",
"[",
"k",
"]",
"try",
":",
"if",
"kwargs",
"[",
"k",
"]",
".",
"isdigit",
"(",
")",
":",
"kwargs",
"[",
"k",
"]",
"=",
"int",
"(",
"kwargs",
"[",
"k",
"]",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"kwargs"
] | Return a URI as kwargs for connecting to PostgreSQL with psycopg2,
applying default values for non-specified areas of the URI.
:param str uri: The connection URI
:rtype: dict | [
"Return",
"a",
"URI",
"as",
"kwargs",
"for",
"connecting",
"to",
"PostgreSQL",
"with",
"psycopg2",
"applying",
"default",
"values",
"for",
"non",
"-",
"specified",
"areas",
"of",
"the",
"URI",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/utils.py#L101-L127 |
415 | gmr/queries | queries/tornado_session.py | TornadoSession._ensure_pool_exists | def _ensure_pool_exists(self):
"""Create the pool in the pool manager if it does not exist."""
if self.pid not in self._pool_manager:
self._pool_manager.create(self.pid, self._pool_idle_ttl,
self._pool_max_size, self._ioloop.time) | python | def _ensure_pool_exists(self):
if self.pid not in self._pool_manager:
self._pool_manager.create(self.pid, self._pool_idle_ttl,
self._pool_max_size, self._ioloop.time) | [
"def",
"_ensure_pool_exists",
"(",
"self",
")",
":",
"if",
"self",
".",
"pid",
"not",
"in",
"self",
".",
"_pool_manager",
":",
"self",
".",
"_pool_manager",
".",
"create",
"(",
"self",
".",
"pid",
",",
"self",
".",
"_pool_idle_ttl",
",",
"self",
".",
"_pool_max_size",
",",
"self",
".",
"_ioloop",
".",
"time",
")"
] | Create the pool in the pool manager if it does not exist. | [
"Create",
"the",
"pool",
"in",
"the",
"pool",
"manager",
"if",
"it",
"does",
"not",
"exist",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L165-L169 |
416 | gmr/queries | queries/tornado_session.py | TornadoSession._create_connection | def _create_connection(self, future):
"""Create a new PostgreSQL connection
:param tornado.concurrent.Future future: future for new conn result
"""
LOGGER.debug('Creating a new connection for %s', self.pid)
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
try:
connection = self._psycopg2_connect(kwargs)
except (psycopg2.Error, OSError, socket.error) as error:
future.set_exception(error)
return
# Add the connection for use in _poll_connection
fd = connection.fileno()
self._connections[fd] = connection
def on_connected(cf):
"""Invoked by the IOLoop when the future is complete for the
connection
:param Future cf: The future for the initial connection
"""
if cf.exception():
self._cleanup_fd(fd, True)
future.set_exception(cf.exception())
else:
try:
# Add the connection to the pool
LOGGER.debug('Connection established for %s', self.pid)
self._pool_manager.add(self.pid, connection)
except (ValueError, pool.PoolException) as err:
LOGGER.exception('Failed to add %r to the pool', self.pid)
self._cleanup_fd(fd)
future.set_exception(err)
return
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2cffi connects and leaves the
# connection in a weird state: consts.STATUS_DATESTYLE,
# returning from Connection._setup without setting the state
# as const.STATUS_OK
if utils.PYPY:
connection.status = extensions.STATUS_READY
# Register the custom data types
self._register_unicode(connection)
self._register_uuid(connection)
# Set the future result
future.set_result(connection)
# Add a future that fires once connected
self._futures[fd] = concurrent.Future()
self._ioloop.add_future(self._futures[fd], on_connected)
# Add the connection to the IOLoop
self._ioloop.add_handler(connection.fileno(),
self._on_io_events,
ioloop.IOLoop.WRITE) | python | def _create_connection(self, future):
LOGGER.debug('Creating a new connection for %s', self.pid)
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
try:
connection = self._psycopg2_connect(kwargs)
except (psycopg2.Error, OSError, socket.error) as error:
future.set_exception(error)
return
# Add the connection for use in _poll_connection
fd = connection.fileno()
self._connections[fd] = connection
def on_connected(cf):
"""Invoked by the IOLoop when the future is complete for the
connection
:param Future cf: The future for the initial connection
"""
if cf.exception():
self._cleanup_fd(fd, True)
future.set_exception(cf.exception())
else:
try:
# Add the connection to the pool
LOGGER.debug('Connection established for %s', self.pid)
self._pool_manager.add(self.pid, connection)
except (ValueError, pool.PoolException) as err:
LOGGER.exception('Failed to add %r to the pool', self.pid)
self._cleanup_fd(fd)
future.set_exception(err)
return
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2cffi connects and leaves the
# connection in a weird state: consts.STATUS_DATESTYLE,
# returning from Connection._setup without setting the state
# as const.STATUS_OK
if utils.PYPY:
connection.status = extensions.STATUS_READY
# Register the custom data types
self._register_unicode(connection)
self._register_uuid(connection)
# Set the future result
future.set_result(connection)
# Add a future that fires once connected
self._futures[fd] = concurrent.Future()
self._ioloop.add_future(self._futures[fd], on_connected)
# Add the connection to the IOLoop
self._ioloop.add_handler(connection.fileno(),
self._on_io_events,
ioloop.IOLoop.WRITE) | [
"def",
"_create_connection",
"(",
"self",
",",
"future",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Creating a new connection for %s'",
",",
"self",
".",
"pid",
")",
"# Create a new PostgreSQL connection",
"kwargs",
"=",
"utils",
".",
"uri_to_kwargs",
"(",
"self",
".",
"_uri",
")",
"try",
":",
"connection",
"=",
"self",
".",
"_psycopg2_connect",
"(",
"kwargs",
")",
"except",
"(",
"psycopg2",
".",
"Error",
",",
"OSError",
",",
"socket",
".",
"error",
")",
"as",
"error",
":",
"future",
".",
"set_exception",
"(",
"error",
")",
"return",
"# Add the connection for use in _poll_connection",
"fd",
"=",
"connection",
".",
"fileno",
"(",
")",
"self",
".",
"_connections",
"[",
"fd",
"]",
"=",
"connection",
"def",
"on_connected",
"(",
"cf",
")",
":",
"\"\"\"Invoked by the IOLoop when the future is complete for the\n connection\n\n :param Future cf: The future for the initial connection\n\n \"\"\"",
"if",
"cf",
".",
"exception",
"(",
")",
":",
"self",
".",
"_cleanup_fd",
"(",
"fd",
",",
"True",
")",
"future",
".",
"set_exception",
"(",
"cf",
".",
"exception",
"(",
")",
")",
"else",
":",
"try",
":",
"# Add the connection to the pool",
"LOGGER",
".",
"debug",
"(",
"'Connection established for %s'",
",",
"self",
".",
"pid",
")",
"self",
".",
"_pool_manager",
".",
"add",
"(",
"self",
".",
"pid",
",",
"connection",
")",
"except",
"(",
"ValueError",
",",
"pool",
".",
"PoolException",
")",
"as",
"err",
":",
"LOGGER",
".",
"exception",
"(",
"'Failed to add %r to the pool'",
",",
"self",
".",
"pid",
")",
"self",
".",
"_cleanup_fd",
"(",
"fd",
")",
"future",
".",
"set_exception",
"(",
"err",
")",
"return",
"self",
".",
"_pool_manager",
".",
"lock",
"(",
"self",
".",
"pid",
",",
"connection",
",",
"self",
")",
"# Added in because psycopg2cffi connects and leaves the",
"# connection in a weird state: consts.STATUS_DATESTYLE,",
"# returning from Connection._setup without setting the state",
"# as const.STATUS_OK",
"if",
"utils",
".",
"PYPY",
":",
"connection",
".",
"status",
"=",
"extensions",
".",
"STATUS_READY",
"# Register the custom data types",
"self",
".",
"_register_unicode",
"(",
"connection",
")",
"self",
".",
"_register_uuid",
"(",
"connection",
")",
"# Set the future result",
"future",
".",
"set_result",
"(",
"connection",
")",
"# Add a future that fires once connected",
"self",
".",
"_futures",
"[",
"fd",
"]",
"=",
"concurrent",
".",
"Future",
"(",
")",
"self",
".",
"_ioloop",
".",
"add_future",
"(",
"self",
".",
"_futures",
"[",
"fd",
"]",
",",
"on_connected",
")",
"# Add the connection to the IOLoop",
"self",
".",
"_ioloop",
".",
"add_handler",
"(",
"connection",
".",
"fileno",
"(",
")",
",",
"self",
".",
"_on_io_events",
",",
"ioloop",
".",
"IOLoop",
".",
"WRITE",
")"
] | Create a new PostgreSQL connection
:param tornado.concurrent.Future future: future for new conn result | [
"Create",
"a",
"new",
"PostgreSQL",
"connection"
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L269-L336 |
417 | gmr/queries | queries/tornado_session.py | TornadoSession._exec_cleanup | def _exec_cleanup(self, cursor, fd):
"""Close the cursor, remove any references to the fd in internal state
and remove the fd from the ioloop.
:param psycopg2.extensions.cursor cursor: The cursor to close
:param int fd: The connection file descriptor
"""
LOGGER.debug('Closing cursor and cleaning %s', fd)
try:
cursor.close()
except (psycopg2.Error, psycopg2.Warning) as error:
LOGGER.debug('Error closing the cursor: %s', error)
self._cleanup_fd(fd)
# If the cleanup callback exists, remove it
if self._cleanup_callback:
self._ioloop.remove_timeout(self._cleanup_callback)
# Create a new cleanup callback to clean the pool of idle connections
self._cleanup_callback = self._ioloop.add_timeout(
self._ioloop.time() + self._pool_idle_ttl + 1,
self._pool_manager.clean, self.pid) | python | def _exec_cleanup(self, cursor, fd):
LOGGER.debug('Closing cursor and cleaning %s', fd)
try:
cursor.close()
except (psycopg2.Error, psycopg2.Warning) as error:
LOGGER.debug('Error closing the cursor: %s', error)
self._cleanup_fd(fd)
# If the cleanup callback exists, remove it
if self._cleanup_callback:
self._ioloop.remove_timeout(self._cleanup_callback)
# Create a new cleanup callback to clean the pool of idle connections
self._cleanup_callback = self._ioloop.add_timeout(
self._ioloop.time() + self._pool_idle_ttl + 1,
self._pool_manager.clean, self.pid) | [
"def",
"_exec_cleanup",
"(",
"self",
",",
"cursor",
",",
"fd",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Closing cursor and cleaning %s'",
",",
"fd",
")",
"try",
":",
"cursor",
".",
"close",
"(",
")",
"except",
"(",
"psycopg2",
".",
"Error",
",",
"psycopg2",
".",
"Warning",
")",
"as",
"error",
":",
"LOGGER",
".",
"debug",
"(",
"'Error closing the cursor: %s'",
",",
"error",
")",
"self",
".",
"_cleanup_fd",
"(",
"fd",
")",
"# If the cleanup callback exists, remove it",
"if",
"self",
".",
"_cleanup_callback",
":",
"self",
".",
"_ioloop",
".",
"remove_timeout",
"(",
"self",
".",
"_cleanup_callback",
")",
"# Create a new cleanup callback to clean the pool of idle connections",
"self",
".",
"_cleanup_callback",
"=",
"self",
".",
"_ioloop",
".",
"add_timeout",
"(",
"self",
".",
"_ioloop",
".",
"time",
"(",
")",
"+",
"self",
".",
"_pool_idle_ttl",
"+",
"1",
",",
"self",
".",
"_pool_manager",
".",
"clean",
",",
"self",
".",
"pid",
")"
] | Close the cursor, remove any references to the fd in internal state
and remove the fd from the ioloop.
:param psycopg2.extensions.cursor cursor: The cursor to close
:param int fd: The connection file descriptor | [
"Close",
"the",
"cursor",
"remove",
"any",
"references",
"to",
"the",
"fd",
"in",
"internal",
"state",
"and",
"remove",
"the",
"fd",
"from",
"the",
"ioloop",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L408-L431 |
418 | gmr/queries | queries/tornado_session.py | TornadoSession._cleanup_fd | def _cleanup_fd(self, fd, close=False):
"""Ensure the socket socket is removed from the IOLoop, the
connection stack, and futures stack.
:param int fd: The fd # to cleanup
"""
self._ioloop.remove_handler(fd)
if fd in self._connections:
try:
self._pool_manager.free(self.pid, self._connections[fd])
except pool.ConnectionNotFoundError:
pass
if close:
self._connections[fd].close()
del self._connections[fd]
if fd in self._futures:
del self._futures[fd] | python | def _cleanup_fd(self, fd, close=False):
self._ioloop.remove_handler(fd)
if fd in self._connections:
try:
self._pool_manager.free(self.pid, self._connections[fd])
except pool.ConnectionNotFoundError:
pass
if close:
self._connections[fd].close()
del self._connections[fd]
if fd in self._futures:
del self._futures[fd] | [
"def",
"_cleanup_fd",
"(",
"self",
",",
"fd",
",",
"close",
"=",
"False",
")",
":",
"self",
".",
"_ioloop",
".",
"remove_handler",
"(",
"fd",
")",
"if",
"fd",
"in",
"self",
".",
"_connections",
":",
"try",
":",
"self",
".",
"_pool_manager",
".",
"free",
"(",
"self",
".",
"pid",
",",
"self",
".",
"_connections",
"[",
"fd",
"]",
")",
"except",
"pool",
".",
"ConnectionNotFoundError",
":",
"pass",
"if",
"close",
":",
"self",
".",
"_connections",
"[",
"fd",
"]",
".",
"close",
"(",
")",
"del",
"self",
".",
"_connections",
"[",
"fd",
"]",
"if",
"fd",
"in",
"self",
".",
"_futures",
":",
"del",
"self",
".",
"_futures",
"[",
"fd",
"]"
] | Ensure the socket socket is removed from the IOLoop, the
connection stack, and futures stack.
:param int fd: The fd # to cleanup | [
"Ensure",
"the",
"socket",
"socket",
"is",
"removed",
"from",
"the",
"IOLoop",
"the",
"connection",
"stack",
"and",
"futures",
"stack",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L433-L450 |
419 | gmr/queries | queries/tornado_session.py | TornadoSession._on_io_events | def _on_io_events(self, fd=None, _events=None):
"""Invoked by Tornado's IOLoop when there are events for the fd
:param int fd: The file descriptor for the event
:param int _events: The events raised
"""
if fd not in self._connections:
LOGGER.warning('Received IO event for non-existing connection')
return
self._poll_connection(fd) | python | def _on_io_events(self, fd=None, _events=None):
if fd not in self._connections:
LOGGER.warning('Received IO event for non-existing connection')
return
self._poll_connection(fd) | [
"def",
"_on_io_events",
"(",
"self",
",",
"fd",
"=",
"None",
",",
"_events",
"=",
"None",
")",
":",
"if",
"fd",
"not",
"in",
"self",
".",
"_connections",
":",
"LOGGER",
".",
"warning",
"(",
"'Received IO event for non-existing connection'",
")",
"return",
"self",
".",
"_poll_connection",
"(",
"fd",
")"
] | Invoked by Tornado's IOLoop when there are events for the fd
:param int fd: The file descriptor for the event
:param int _events: The events raised | [
"Invoked",
"by",
"Tornado",
"s",
"IOLoop",
"when",
"there",
"are",
"events",
"for",
"the",
"fd"
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L468-L478 |
420 | gmr/queries | queries/tornado_session.py | TornadoSession._poll_connection | def _poll_connection(self, fd):
"""Check with psycopg2 to see what action to take. If the state is
POLL_OK, we should have a pending callback for that fd.
:param int fd: The socket fd for the postgresql connection
"""
try:
state = self._connections[fd].poll()
except (OSError, socket.error) as error:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.OperationalError('Connection error (%s)' % error)
)
except (psycopg2.Error, psycopg2.Warning) as error:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(error)
else:
if state == extensions.POLL_OK:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_result(True)
elif state == extensions.POLL_WRITE:
self._ioloop.update_handler(fd, ioloop.IOLoop.WRITE)
elif state == extensions.POLL_READ:
self._ioloop.update_handler(fd, ioloop.IOLoop.READ)
elif state == extensions.POLL_ERROR:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.Error('Poll Error')) | python | def _poll_connection(self, fd):
try:
state = self._connections[fd].poll()
except (OSError, socket.error) as error:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.OperationalError('Connection error (%s)' % error)
)
except (psycopg2.Error, psycopg2.Warning) as error:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(error)
else:
if state == extensions.POLL_OK:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_result(True)
elif state == extensions.POLL_WRITE:
self._ioloop.update_handler(fd, ioloop.IOLoop.WRITE)
elif state == extensions.POLL_READ:
self._ioloop.update_handler(fd, ioloop.IOLoop.READ)
elif state == extensions.POLL_ERROR:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.Error('Poll Error')) | [
"def",
"_poll_connection",
"(",
"self",
",",
"fd",
")",
":",
"try",
":",
"state",
"=",
"self",
".",
"_connections",
"[",
"fd",
"]",
".",
"poll",
"(",
")",
"except",
"(",
"OSError",
",",
"socket",
".",
"error",
")",
"as",
"error",
":",
"self",
".",
"_ioloop",
".",
"remove_handler",
"(",
"fd",
")",
"if",
"fd",
"in",
"self",
".",
"_futures",
"and",
"not",
"self",
".",
"_futures",
"[",
"fd",
"]",
".",
"done",
"(",
")",
":",
"self",
".",
"_futures",
"[",
"fd",
"]",
".",
"set_exception",
"(",
"psycopg2",
".",
"OperationalError",
"(",
"'Connection error (%s)'",
"%",
"error",
")",
")",
"except",
"(",
"psycopg2",
".",
"Error",
",",
"psycopg2",
".",
"Warning",
")",
"as",
"error",
":",
"if",
"fd",
"in",
"self",
".",
"_futures",
"and",
"not",
"self",
".",
"_futures",
"[",
"fd",
"]",
".",
"done",
"(",
")",
":",
"self",
".",
"_futures",
"[",
"fd",
"]",
".",
"set_exception",
"(",
"error",
")",
"else",
":",
"if",
"state",
"==",
"extensions",
".",
"POLL_OK",
":",
"if",
"fd",
"in",
"self",
".",
"_futures",
"and",
"not",
"self",
".",
"_futures",
"[",
"fd",
"]",
".",
"done",
"(",
")",
":",
"self",
".",
"_futures",
"[",
"fd",
"]",
".",
"set_result",
"(",
"True",
")",
"elif",
"state",
"==",
"extensions",
".",
"POLL_WRITE",
":",
"self",
".",
"_ioloop",
".",
"update_handler",
"(",
"fd",
",",
"ioloop",
".",
"IOLoop",
".",
"WRITE",
")",
"elif",
"state",
"==",
"extensions",
".",
"POLL_READ",
":",
"self",
".",
"_ioloop",
".",
"update_handler",
"(",
"fd",
",",
"ioloop",
".",
"IOLoop",
".",
"READ",
")",
"elif",
"state",
"==",
"extensions",
".",
"POLL_ERROR",
":",
"self",
".",
"_ioloop",
".",
"remove_handler",
"(",
"fd",
")",
"if",
"fd",
"in",
"self",
".",
"_futures",
"and",
"not",
"self",
".",
"_futures",
"[",
"fd",
"]",
".",
"done",
"(",
")",
":",
"self",
".",
"_futures",
"[",
"fd",
"]",
".",
"set_exception",
"(",
"psycopg2",
".",
"Error",
"(",
"'Poll Error'",
")",
")"
] | Check with psycopg2 to see what action to take. If the state is
POLL_OK, we should have a pending callback for that fd.
:param int fd: The socket fd for the postgresql connection | [
"Check",
"with",
"psycopg2",
"to",
"see",
"what",
"action",
"to",
"take",
".",
"If",
"the",
"state",
"is",
"POLL_OK",
"we",
"should",
"have",
"a",
"pending",
"callback",
"for",
"that",
"fd",
"."
] | a68855013dc6aaf9ed7b6909a4701f8da8796a0a | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L480-L510 |
421 | jquast/wcwidth | setup.py | main | def main():
"""Setup.py entry point."""
import codecs
setuptools.setup(
name='wcwidth',
version='0.1.7',
description=("Measures number of Terminal column cells "
"of wide-character codes"),
long_description=codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read(),
author='Jeff Quast',
author_email='[email protected]',
license='MIT',
packages=['wcwidth', 'wcwidth.tests'],
url='https://github.com/jquast/wcwidth',
include_package_data=True,
test_suite='wcwidth.tests',
zip_safe=True,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: Internationalization',
'Topic :: Terminals'
],
keywords=['terminal', 'emulator', 'wcwidth', 'wcswidth', 'cjk',
'combining', 'xterm', 'console', ],
cmdclass={'update': SetupUpdate},
) | python | def main():
import codecs
setuptools.setup(
name='wcwidth',
version='0.1.7',
description=("Measures number of Terminal column cells "
"of wide-character codes"),
long_description=codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read(),
author='Jeff Quast',
author_email='[email protected]',
license='MIT',
packages=['wcwidth', 'wcwidth.tests'],
url='https://github.com/jquast/wcwidth',
include_package_data=True,
test_suite='wcwidth.tests',
zip_safe=True,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: Internationalization',
'Topic :: Terminals'
],
keywords=['terminal', 'emulator', 'wcwidth', 'wcswidth', 'cjk',
'combining', 'xterm', 'console', ],
cmdclass={'update': SetupUpdate},
) | [
"def",
"main",
"(",
")",
":",
"import",
"codecs",
"setuptools",
".",
"setup",
"(",
"name",
"=",
"'wcwidth'",
",",
"version",
"=",
"'0.1.7'",
",",
"description",
"=",
"(",
"\"Measures number of Terminal column cells \"",
"\"of wide-character codes\"",
")",
",",
"long_description",
"=",
"codecs",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"HERE",
",",
"'README.rst'",
")",
",",
"'r'",
",",
"'utf8'",
")",
".",
"read",
"(",
")",
",",
"author",
"=",
"'Jeff Quast'",
",",
"author_email",
"=",
"'[email protected]'",
",",
"license",
"=",
"'MIT'",
",",
"packages",
"=",
"[",
"'wcwidth'",
",",
"'wcwidth.tests'",
"]",
",",
"url",
"=",
"'https://github.com/jquast/wcwidth'",
",",
"include_package_data",
"=",
"True",
",",
"test_suite",
"=",
"'wcwidth.tests'",
",",
"zip_safe",
"=",
"True",
",",
"classifiers",
"=",
"[",
"'Intended Audience :: Developers'",
",",
"'Natural Language :: English'",
",",
"'Development Status :: 3 - Alpha'",
",",
"'Environment :: Console'",
",",
"'License :: OSI Approved :: MIT License'",
",",
"'Operating System :: POSIX'",
",",
"'Programming Language :: Python :: 2.7'",
",",
"'Programming Language :: Python :: 3.4'",
",",
"'Programming Language :: Python :: 3.5'",
",",
"'Topic :: Software Development :: Libraries'",
",",
"'Topic :: Software Development :: Localization'",
",",
"'Topic :: Software Development :: Internationalization'",
",",
"'Topic :: Terminals'",
"]",
",",
"keywords",
"=",
"[",
"'terminal'",
",",
"'emulator'",
",",
"'wcwidth'",
",",
"'wcswidth'",
",",
"'cjk'",
",",
"'combining'",
",",
"'xterm'",
",",
"'console'",
",",
"]",
",",
"cmdclass",
"=",
"{",
"'update'",
":",
"SetupUpdate",
"}",
",",
")"
] | Setup.py entry point. | [
"Setup",
".",
"py",
"entry",
"point",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L271-L307 |
422 | jquast/wcwidth | setup.py | SetupUpdate._do_readme_update | def _do_readme_update(self):
"""Patch README.rst to reflect the data files used in release."""
import codecs
import glob
# read in,
data_in = codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read()
# search for beginning and end positions,
pos_begin = data_in.find(self.README_PATCH_FROM)
assert pos_begin != -1, (pos_begin, self.README_PATCH_FROM)
pos_begin += len(self.README_PATCH_FROM)
pos_end = data_in.find(self.README_PATCH_TO)
assert pos_end != -1, (pos_end, self.README_PATCH_TO)
glob_pattern = os.path.join(HERE, 'data', '*.txt')
file_descriptions = [
self._describe_file_header(fpath)
for fpath in glob.glob(glob_pattern)]
# patch,
data_out = (
data_in[:pos_begin] +
'\n\n' +
'\n'.join(file_descriptions) +
'\n\n' +
data_in[pos_end:]
)
# write.
print("patching {} ..".format(self.README_RST))
codecs.open(
self.README_RST, 'w', 'utf8').write(data_out) | python | def _do_readme_update(self):
import codecs
import glob
# read in,
data_in = codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read()
# search for beginning and end positions,
pos_begin = data_in.find(self.README_PATCH_FROM)
assert pos_begin != -1, (pos_begin, self.README_PATCH_FROM)
pos_begin += len(self.README_PATCH_FROM)
pos_end = data_in.find(self.README_PATCH_TO)
assert pos_end != -1, (pos_end, self.README_PATCH_TO)
glob_pattern = os.path.join(HERE, 'data', '*.txt')
file_descriptions = [
self._describe_file_header(fpath)
for fpath in glob.glob(glob_pattern)]
# patch,
data_out = (
data_in[:pos_begin] +
'\n\n' +
'\n'.join(file_descriptions) +
'\n\n' +
data_in[pos_end:]
)
# write.
print("patching {} ..".format(self.README_RST))
codecs.open(
self.README_RST, 'w', 'utf8').write(data_out) | [
"def",
"_do_readme_update",
"(",
"self",
")",
":",
"import",
"codecs",
"import",
"glob",
"# read in,",
"data_in",
"=",
"codecs",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"HERE",
",",
"'README.rst'",
")",
",",
"'r'",
",",
"'utf8'",
")",
".",
"read",
"(",
")",
"# search for beginning and end positions,",
"pos_begin",
"=",
"data_in",
".",
"find",
"(",
"self",
".",
"README_PATCH_FROM",
")",
"assert",
"pos_begin",
"!=",
"-",
"1",
",",
"(",
"pos_begin",
",",
"self",
".",
"README_PATCH_FROM",
")",
"pos_begin",
"+=",
"len",
"(",
"self",
".",
"README_PATCH_FROM",
")",
"pos_end",
"=",
"data_in",
".",
"find",
"(",
"self",
".",
"README_PATCH_TO",
")",
"assert",
"pos_end",
"!=",
"-",
"1",
",",
"(",
"pos_end",
",",
"self",
".",
"README_PATCH_TO",
")",
"glob_pattern",
"=",
"os",
".",
"path",
".",
"join",
"(",
"HERE",
",",
"'data'",
",",
"'*.txt'",
")",
"file_descriptions",
"=",
"[",
"self",
".",
"_describe_file_header",
"(",
"fpath",
")",
"for",
"fpath",
"in",
"glob",
".",
"glob",
"(",
"glob_pattern",
")",
"]",
"# patch,",
"data_out",
"=",
"(",
"data_in",
"[",
":",
"pos_begin",
"]",
"+",
"'\\n\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"file_descriptions",
")",
"+",
"'\\n\\n'",
"+",
"data_in",
"[",
"pos_end",
":",
"]",
")",
"# write.",
"print",
"(",
"\"patching {} ..\"",
".",
"format",
"(",
"self",
".",
"README_RST",
")",
")",
"codecs",
".",
"open",
"(",
"self",
".",
"README_RST",
",",
"'w'",
",",
"'utf8'",
")",
".",
"write",
"(",
"data_out",
")"
] | Patch README.rst to reflect the data files used in release. | [
"Patch",
"README",
".",
"rst",
"to",
"reflect",
"the",
"data",
"files",
"used",
"in",
"release",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L78-L112 |
423 | jquast/wcwidth | setup.py | SetupUpdate._do_east_asian | def _do_east_asian(self):
"""Fetch and update east-asian tables."""
self._do_retrieve(self.EAW_URL, self.EAW_IN)
(version, date, values) = self._parse_east_asian(
fname=self.EAW_IN,
properties=(u'W', u'F',)
)
table = self._make_table(values)
self._do_write(self.EAW_OUT, 'WIDE_EASTASIAN', version, date, table) | python | def _do_east_asian(self):
self._do_retrieve(self.EAW_URL, self.EAW_IN)
(version, date, values) = self._parse_east_asian(
fname=self.EAW_IN,
properties=(u'W', u'F',)
)
table = self._make_table(values)
self._do_write(self.EAW_OUT, 'WIDE_EASTASIAN', version, date, table) | [
"def",
"_do_east_asian",
"(",
"self",
")",
":",
"self",
".",
"_do_retrieve",
"(",
"self",
".",
"EAW_URL",
",",
"self",
".",
"EAW_IN",
")",
"(",
"version",
",",
"date",
",",
"values",
")",
"=",
"self",
".",
"_parse_east_asian",
"(",
"fname",
"=",
"self",
".",
"EAW_IN",
",",
"properties",
"=",
"(",
"u'W'",
",",
"u'F'",
",",
")",
")",
"table",
"=",
"self",
".",
"_make_table",
"(",
"values",
")",
"self",
".",
"_do_write",
"(",
"self",
".",
"EAW_OUT",
",",
"'WIDE_EASTASIAN'",
",",
"version",
",",
"date",
",",
"table",
")"
] | Fetch and update east-asian tables. | [
"Fetch",
"and",
"update",
"east",
"-",
"asian",
"tables",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L114-L122 |
424 | jquast/wcwidth | setup.py | SetupUpdate._do_zero_width | def _do_zero_width(self):
"""Fetch and update zero width tables."""
self._do_retrieve(self.UCD_URL, self.UCD_IN)
(version, date, values) = self._parse_category(
fname=self.UCD_IN,
categories=('Me', 'Mn',)
)
table = self._make_table(values)
self._do_write(self.ZERO_OUT, 'ZERO_WIDTH', version, date, table) | python | def _do_zero_width(self):
self._do_retrieve(self.UCD_URL, self.UCD_IN)
(version, date, values) = self._parse_category(
fname=self.UCD_IN,
categories=('Me', 'Mn',)
)
table = self._make_table(values)
self._do_write(self.ZERO_OUT, 'ZERO_WIDTH', version, date, table) | [
"def",
"_do_zero_width",
"(",
"self",
")",
":",
"self",
".",
"_do_retrieve",
"(",
"self",
".",
"UCD_URL",
",",
"self",
".",
"UCD_IN",
")",
"(",
"version",
",",
"date",
",",
"values",
")",
"=",
"self",
".",
"_parse_category",
"(",
"fname",
"=",
"self",
".",
"UCD_IN",
",",
"categories",
"=",
"(",
"'Me'",
",",
"'Mn'",
",",
")",
")",
"table",
"=",
"self",
".",
"_make_table",
"(",
"values",
")",
"self",
".",
"_do_write",
"(",
"self",
".",
"ZERO_OUT",
",",
"'ZERO_WIDTH'",
",",
"version",
",",
"date",
",",
"table",
")"
] | Fetch and update zero width tables. | [
"Fetch",
"and",
"update",
"zero",
"width",
"tables",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L124-L132 |
425 | jquast/wcwidth | setup.py | SetupUpdate._make_table | def _make_table(values):
"""Return a tuple of lookup tables for given values."""
import collections
table = collections.deque()
start, end = values[0], values[0]
for num, value in enumerate(values):
if num == 0:
table.append((value, value,))
continue
start, end = table.pop()
if end == value - 1:
table.append((start, value,))
else:
table.append((start, end,))
table.append((value, value,))
return tuple(table) | python | def _make_table(values):
import collections
table = collections.deque()
start, end = values[0], values[0]
for num, value in enumerate(values):
if num == 0:
table.append((value, value,))
continue
start, end = table.pop()
if end == value - 1:
table.append((start, value,))
else:
table.append((start, end,))
table.append((value, value,))
return tuple(table) | [
"def",
"_make_table",
"(",
"values",
")",
":",
"import",
"collections",
"table",
"=",
"collections",
".",
"deque",
"(",
")",
"start",
",",
"end",
"=",
"values",
"[",
"0",
"]",
",",
"values",
"[",
"0",
"]",
"for",
"num",
",",
"value",
"in",
"enumerate",
"(",
"values",
")",
":",
"if",
"num",
"==",
"0",
":",
"table",
".",
"append",
"(",
"(",
"value",
",",
"value",
",",
")",
")",
"continue",
"start",
",",
"end",
"=",
"table",
".",
"pop",
"(",
")",
"if",
"end",
"==",
"value",
"-",
"1",
":",
"table",
".",
"append",
"(",
"(",
"start",
",",
"value",
",",
")",
")",
"else",
":",
"table",
".",
"append",
"(",
"(",
"start",
",",
"end",
",",
")",
")",
"table",
".",
"append",
"(",
"(",
"value",
",",
"value",
",",
")",
")",
"return",
"tuple",
"(",
"table",
")"
] | Return a tuple of lookup tables for given values. | [
"Return",
"a",
"tuple",
"of",
"lookup",
"tables",
"for",
"given",
"values",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L135-L150 |
426 | jquast/wcwidth | setup.py | SetupUpdate._do_retrieve | def _do_retrieve(url, fname):
"""Retrieve given url to target filepath fname."""
folder = os.path.dirname(fname)
if not os.path.exists(folder):
os.makedirs(folder)
print("{}/ created.".format(folder))
if not os.path.exists(fname):
with open(fname, 'wb') as fout:
print("retrieving {}.".format(url))
resp = urlopen(url)
fout.write(resp.read())
print("{} saved.".format(fname))
else:
print("re-using artifact {}".format(fname))
return fname | python | def _do_retrieve(url, fname):
folder = os.path.dirname(fname)
if not os.path.exists(folder):
os.makedirs(folder)
print("{}/ created.".format(folder))
if not os.path.exists(fname):
with open(fname, 'wb') as fout:
print("retrieving {}.".format(url))
resp = urlopen(url)
fout.write(resp.read())
print("{} saved.".format(fname))
else:
print("re-using artifact {}".format(fname))
return fname | [
"def",
"_do_retrieve",
"(",
"url",
",",
"fname",
")",
":",
"folder",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fname",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"folder",
")",
":",
"os",
".",
"makedirs",
"(",
"folder",
")",
"print",
"(",
"\"{}/ created.\"",
".",
"format",
"(",
"folder",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"fout",
":",
"print",
"(",
"\"retrieving {}.\"",
".",
"format",
"(",
"url",
")",
")",
"resp",
"=",
"urlopen",
"(",
"url",
")",
"fout",
".",
"write",
"(",
"resp",
".",
"read",
"(",
")",
")",
"print",
"(",
"\"{} saved.\"",
".",
"format",
"(",
"fname",
")",
")",
"else",
":",
"print",
"(",
"\"re-using artifact {}\"",
".",
"format",
"(",
"fname",
")",
")",
"return",
"fname"
] | Retrieve given url to target filepath fname. | [
"Retrieve",
"given",
"url",
"to",
"target",
"filepath",
"fname",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L153-L167 |
427 | jquast/wcwidth | setup.py | SetupUpdate._parse_east_asian | def _parse_east_asian(fname, properties=(u'W', u'F',)):
"""Parse unicode east-asian width tables."""
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
addrs, details = uline.split(';', 1)
if any(details.startswith(property)
for property in properties):
start, stop = addrs, addrs
if '..' in addrs:
start, stop = addrs.split('..')
values.extend(range(int(start, 16), int(stop, 16) + 1))
return version, date, sorted(values) | python | def _parse_east_asian(fname, properties=(u'W', u'F',)):
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
addrs, details = uline.split(';', 1)
if any(details.startswith(property)
for property in properties):
start, stop = addrs, addrs
if '..' in addrs:
start, stop = addrs.split('..')
values.extend(range(int(start, 16), int(stop, 16) + 1))
return version, date, sorted(values) | [
"def",
"_parse_east_asian",
"(",
"fname",
",",
"properties",
"=",
"(",
"u'W'",
",",
"u'F'",
",",
")",
")",
":",
"version",
",",
"date",
",",
"values",
"=",
"None",
",",
"None",
",",
"[",
"]",
"print",
"(",
"\"parsing {} ..\"",
".",
"format",
"(",
"fname",
")",
")",
"for",
"line",
"in",
"open",
"(",
"fname",
",",
"'rb'",
")",
":",
"uline",
"=",
"line",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"version",
"is",
"None",
":",
"version",
"=",
"uline",
".",
"split",
"(",
"None",
",",
"1",
")",
"[",
"1",
"]",
".",
"rstrip",
"(",
")",
"continue",
"elif",
"date",
"is",
"None",
":",
"date",
"=",
"uline",
".",
"split",
"(",
"':'",
",",
"1",
")",
"[",
"1",
"]",
".",
"rstrip",
"(",
")",
"continue",
"if",
"uline",
".",
"startswith",
"(",
"'#'",
")",
"or",
"not",
"uline",
".",
"lstrip",
"(",
")",
":",
"continue",
"addrs",
",",
"details",
"=",
"uline",
".",
"split",
"(",
"';'",
",",
"1",
")",
"if",
"any",
"(",
"details",
".",
"startswith",
"(",
"property",
")",
"for",
"property",
"in",
"properties",
")",
":",
"start",
",",
"stop",
"=",
"addrs",
",",
"addrs",
"if",
"'..'",
"in",
"addrs",
":",
"start",
",",
"stop",
"=",
"addrs",
".",
"split",
"(",
"'..'",
")",
"values",
".",
"extend",
"(",
"range",
"(",
"int",
"(",
"start",
",",
"16",
")",
",",
"int",
"(",
"stop",
",",
"16",
")",
"+",
"1",
")",
")",
"return",
"version",
",",
"date",
",",
"sorted",
"(",
"values",
")"
] | Parse unicode east-asian width tables. | [
"Parse",
"unicode",
"east",
"-",
"asian",
"width",
"tables",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L180-L201 |
428 | jquast/wcwidth | setup.py | SetupUpdate._parse_category | def _parse_category(fname, categories):
"""Parse unicode category tables."""
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
addrs, details = uline.split(';', 1)
addrs, details = addrs.rstrip(), details.lstrip()
if any(details.startswith('{} #'.format(value))
for value in categories):
start, stop = addrs, addrs
if '..' in addrs:
start, stop = addrs.split('..')
values.extend(range(int(start, 16), int(stop, 16) + 1))
return version, date, sorted(values) | python | def _parse_category(fname, categories):
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
addrs, details = uline.split(';', 1)
addrs, details = addrs.rstrip(), details.lstrip()
if any(details.startswith('{} #'.format(value))
for value in categories):
start, stop = addrs, addrs
if '..' in addrs:
start, stop = addrs.split('..')
values.extend(range(int(start, 16), int(stop, 16) + 1))
return version, date, sorted(values) | [
"def",
"_parse_category",
"(",
"fname",
",",
"categories",
")",
":",
"version",
",",
"date",
",",
"values",
"=",
"None",
",",
"None",
",",
"[",
"]",
"print",
"(",
"\"parsing {} ..\"",
".",
"format",
"(",
"fname",
")",
")",
"for",
"line",
"in",
"open",
"(",
"fname",
",",
"'rb'",
")",
":",
"uline",
"=",
"line",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"version",
"is",
"None",
":",
"version",
"=",
"uline",
".",
"split",
"(",
"None",
",",
"1",
")",
"[",
"1",
"]",
".",
"rstrip",
"(",
")",
"continue",
"elif",
"date",
"is",
"None",
":",
"date",
"=",
"uline",
".",
"split",
"(",
"':'",
",",
"1",
")",
"[",
"1",
"]",
".",
"rstrip",
"(",
")",
"continue",
"if",
"uline",
".",
"startswith",
"(",
"'#'",
")",
"or",
"not",
"uline",
".",
"lstrip",
"(",
")",
":",
"continue",
"addrs",
",",
"details",
"=",
"uline",
".",
"split",
"(",
"';'",
",",
"1",
")",
"addrs",
",",
"details",
"=",
"addrs",
".",
"rstrip",
"(",
")",
",",
"details",
".",
"lstrip",
"(",
")",
"if",
"any",
"(",
"details",
".",
"startswith",
"(",
"'{} #'",
".",
"format",
"(",
"value",
")",
")",
"for",
"value",
"in",
"categories",
")",
":",
"start",
",",
"stop",
"=",
"addrs",
",",
"addrs",
"if",
"'..'",
"in",
"addrs",
":",
"start",
",",
"stop",
"=",
"addrs",
".",
"split",
"(",
"'..'",
")",
"values",
".",
"extend",
"(",
"range",
"(",
"int",
"(",
"start",
",",
"16",
")",
",",
"int",
"(",
"stop",
",",
"16",
")",
"+",
"1",
")",
")",
"return",
"version",
",",
"date",
",",
"sorted",
"(",
"values",
")"
] | Parse unicode category tables. | [
"Parse",
"unicode",
"category",
"tables",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L204-L226 |
429 | jquast/wcwidth | setup.py | SetupUpdate._do_write | def _do_write(fname, variable, version, date, table):
"""Write combining tables to filesystem as python code."""
# pylint: disable=R0914
# Too many local variables (19/15) (col 4)
print("writing {} ..".format(fname))
import unicodedata
import datetime
import string
utc_now = datetime.datetime.utcnow()
indent = 4
with open(fname, 'w') as fout:
fout.write(
'"""{variable_proper} table. Created by setup.py."""\n'
"# Generated: {iso_utc}\n"
"# Source: {version}\n"
"# Date: {date}\n"
"{variable} = (".format(iso_utc=utc_now.isoformat(),
version=version,
date=date,
variable=variable,
variable_proper=variable.title()))
for start, end in table:
ucs_start, ucs_end = unichr(start), unichr(end)
hex_start, hex_end = ('0x{0:04x}'.format(start),
'0x{0:04x}'.format(end))
try:
name_start = string.capwords(unicodedata.name(ucs_start))
except ValueError:
name_start = u''
try:
name_end = string.capwords(unicodedata.name(ucs_end))
except ValueError:
name_end = u''
fout.write('\n' + (' ' * indent))
fout.write('({0}, {1},),'.format(hex_start, hex_end))
fout.write(' # {0:24s}..{1}'.format(
name_start[:24].rstrip() or '(nil)',
name_end[:24].rstrip()))
fout.write('\n)\n')
print("complete.") | python | def _do_write(fname, variable, version, date, table):
# pylint: disable=R0914
# Too many local variables (19/15) (col 4)
print("writing {} ..".format(fname))
import unicodedata
import datetime
import string
utc_now = datetime.datetime.utcnow()
indent = 4
with open(fname, 'w') as fout:
fout.write(
'"""{variable_proper} table. Created by setup.py."""\n'
"# Generated: {iso_utc}\n"
"# Source: {version}\n"
"# Date: {date}\n"
"{variable} = (".format(iso_utc=utc_now.isoformat(),
version=version,
date=date,
variable=variable,
variable_proper=variable.title()))
for start, end in table:
ucs_start, ucs_end = unichr(start), unichr(end)
hex_start, hex_end = ('0x{0:04x}'.format(start),
'0x{0:04x}'.format(end))
try:
name_start = string.capwords(unicodedata.name(ucs_start))
except ValueError:
name_start = u''
try:
name_end = string.capwords(unicodedata.name(ucs_end))
except ValueError:
name_end = u''
fout.write('\n' + (' ' * indent))
fout.write('({0}, {1},),'.format(hex_start, hex_end))
fout.write(' # {0:24s}..{1}'.format(
name_start[:24].rstrip() or '(nil)',
name_end[:24].rstrip()))
fout.write('\n)\n')
print("complete.") | [
"def",
"_do_write",
"(",
"fname",
",",
"variable",
",",
"version",
",",
"date",
",",
"table",
")",
":",
"# pylint: disable=R0914",
"# Too many local variables (19/15) (col 4)",
"print",
"(",
"\"writing {} ..\"",
".",
"format",
"(",
"fname",
")",
")",
"import",
"unicodedata",
"import",
"datetime",
"import",
"string",
"utc_now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"indent",
"=",
"4",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"fout",
":",
"fout",
".",
"write",
"(",
"'\"\"\"{variable_proper} table. Created by setup.py.\"\"\"\\n'",
"\"# Generated: {iso_utc}\\n\"",
"\"# Source: {version}\\n\"",
"\"# Date: {date}\\n\"",
"\"{variable} = (\"",
".",
"format",
"(",
"iso_utc",
"=",
"utc_now",
".",
"isoformat",
"(",
")",
",",
"version",
"=",
"version",
",",
"date",
"=",
"date",
",",
"variable",
"=",
"variable",
",",
"variable_proper",
"=",
"variable",
".",
"title",
"(",
")",
")",
")",
"for",
"start",
",",
"end",
"in",
"table",
":",
"ucs_start",
",",
"ucs_end",
"=",
"unichr",
"(",
"start",
")",
",",
"unichr",
"(",
"end",
")",
"hex_start",
",",
"hex_end",
"=",
"(",
"'0x{0:04x}'",
".",
"format",
"(",
"start",
")",
",",
"'0x{0:04x}'",
".",
"format",
"(",
"end",
")",
")",
"try",
":",
"name_start",
"=",
"string",
".",
"capwords",
"(",
"unicodedata",
".",
"name",
"(",
"ucs_start",
")",
")",
"except",
"ValueError",
":",
"name_start",
"=",
"u''",
"try",
":",
"name_end",
"=",
"string",
".",
"capwords",
"(",
"unicodedata",
".",
"name",
"(",
"ucs_end",
")",
")",
"except",
"ValueError",
":",
"name_end",
"=",
"u''",
"fout",
".",
"write",
"(",
"'\\n'",
"+",
"(",
"' '",
"*",
"indent",
")",
")",
"fout",
".",
"write",
"(",
"'({0}, {1},),'",
".",
"format",
"(",
"hex_start",
",",
"hex_end",
")",
")",
"fout",
".",
"write",
"(",
"' # {0:24s}..{1}'",
".",
"format",
"(",
"name_start",
"[",
":",
"24",
"]",
".",
"rstrip",
"(",
")",
"or",
"'(nil)'",
",",
"name_end",
"[",
":",
"24",
"]",
".",
"rstrip",
"(",
")",
")",
")",
"fout",
".",
"write",
"(",
"'\\n)\\n'",
")",
"print",
"(",
"\"complete.\"",
")"
] | Write combining tables to filesystem as python code. | [
"Write",
"combining",
"tables",
"to",
"filesystem",
"as",
"python",
"code",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L229-L268 |
430 | jquast/wcwidth | bin/wcwidth-libc-comparator.py | report_ucs_msg | def report_ucs_msg(ucs, wcwidth_libc, wcwidth_local):
"""
Return string report of combining character differences.
:param ucs: unicode point.
:type ucs: unicode
:param wcwidth_libc: libc-wcwidth's reported character length.
:type comb_py: int
:param wcwidth_local: wcwidth's reported character length.
:type comb_wc: int
:rtype: unicode
"""
ucp = (ucs.encode('unicode_escape')[2:]
.decode('ascii')
.upper()
.lstrip('0'))
url = "http://codepoints.net/U+{}".format(ucp)
name = unicodedata.name(ucs)
return (u"libc,ours={},{} [--o{}o--] name={} val={} {}"
" ".format(wcwidth_libc, wcwidth_local, ucs, name, ord(ucs), url)) | python | def report_ucs_msg(ucs, wcwidth_libc, wcwidth_local):
ucp = (ucs.encode('unicode_escape')[2:]
.decode('ascii')
.upper()
.lstrip('0'))
url = "http://codepoints.net/U+{}".format(ucp)
name = unicodedata.name(ucs)
return (u"libc,ours={},{} [--o{}o--] name={} val={} {}"
" ".format(wcwidth_libc, wcwidth_local, ucs, name, ord(ucs), url)) | [
"def",
"report_ucs_msg",
"(",
"ucs",
",",
"wcwidth_libc",
",",
"wcwidth_local",
")",
":",
"ucp",
"=",
"(",
"ucs",
".",
"encode",
"(",
"'unicode_escape'",
")",
"[",
"2",
":",
"]",
".",
"decode",
"(",
"'ascii'",
")",
".",
"upper",
"(",
")",
".",
"lstrip",
"(",
"'0'",
")",
")",
"url",
"=",
"\"http://codepoints.net/U+{}\"",
".",
"format",
"(",
"ucp",
")",
"name",
"=",
"unicodedata",
".",
"name",
"(",
"ucs",
")",
"return",
"(",
"u\"libc,ours={},{} [--o{}o--] name={} val={} {}\"",
"\" \"",
".",
"format",
"(",
"wcwidth_libc",
",",
"wcwidth_local",
",",
"ucs",
",",
"name",
",",
"ord",
"(",
"ucs",
")",
",",
"url",
")",
")"
] | Return string report of combining character differences.
:param ucs: unicode point.
:type ucs: unicode
:param wcwidth_libc: libc-wcwidth's reported character length.
:type comb_py: int
:param wcwidth_local: wcwidth's reported character length.
:type comb_wc: int
:rtype: unicode | [
"Return",
"string",
"report",
"of",
"combining",
"character",
"differences",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-libc-comparator.py#L44-L63 |
431 | jquast/wcwidth | bin/wcwidth-browser.py | validate_args | def validate_args(opts):
"""Validate and return options provided by docopt parsing."""
if opts['--wide'] is None:
opts['--wide'] = 2
else:
assert opts['--wide'] in ("1", "2"), opts['--wide']
if opts['--alignment'] is None:
opts['--alignment'] = 'left'
else:
assert opts['--alignment'] in ('left', 'right'), opts['--alignment']
opts['--wide'] = int(opts['--wide'])
opts['character_factory'] = WcWideCharacterGenerator
if opts['--combining']:
opts['character_factory'] = WcCombinedCharacterGenerator
return opts | python | def validate_args(opts):
if opts['--wide'] is None:
opts['--wide'] = 2
else:
assert opts['--wide'] in ("1", "2"), opts['--wide']
if opts['--alignment'] is None:
opts['--alignment'] = 'left'
else:
assert opts['--alignment'] in ('left', 'right'), opts['--alignment']
opts['--wide'] = int(opts['--wide'])
opts['character_factory'] = WcWideCharacterGenerator
if opts['--combining']:
opts['character_factory'] = WcCombinedCharacterGenerator
return opts | [
"def",
"validate_args",
"(",
"opts",
")",
":",
"if",
"opts",
"[",
"'--wide'",
"]",
"is",
"None",
":",
"opts",
"[",
"'--wide'",
"]",
"=",
"2",
"else",
":",
"assert",
"opts",
"[",
"'--wide'",
"]",
"in",
"(",
"\"1\"",
",",
"\"2\"",
")",
",",
"opts",
"[",
"'--wide'",
"]",
"if",
"opts",
"[",
"'--alignment'",
"]",
"is",
"None",
":",
"opts",
"[",
"'--alignment'",
"]",
"=",
"'left'",
"else",
":",
"assert",
"opts",
"[",
"'--alignment'",
"]",
"in",
"(",
"'left'",
",",
"'right'",
")",
",",
"opts",
"[",
"'--alignment'",
"]",
"opts",
"[",
"'--wide'",
"]",
"=",
"int",
"(",
"opts",
"[",
"'--wide'",
"]",
")",
"opts",
"[",
"'character_factory'",
"]",
"=",
"WcWideCharacterGenerator",
"if",
"opts",
"[",
"'--combining'",
"]",
":",
"opts",
"[",
"'character_factory'",
"]",
"=",
"WcCombinedCharacterGenerator",
"return",
"opts"
] | Validate and return options provided by docopt parsing. | [
"Validate",
"and",
"return",
"options",
"provided",
"by",
"docopt",
"parsing",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L661-L675 |
432 | jquast/wcwidth | bin/wcwidth-browser.py | Screen.hint_width | def hint_width(self):
"""Width of a column segment."""
return sum((len(self.style.delimiter),
self.wide,
len(self.style.delimiter),
len(u' '),
UCS_PRINTLEN + 2,
len(u' '),
self.style.name_len,)) | python | def hint_width(self):
return sum((len(self.style.delimiter),
self.wide,
len(self.style.delimiter),
len(u' '),
UCS_PRINTLEN + 2,
len(u' '),
self.style.name_len,)) | [
"def",
"hint_width",
"(",
"self",
")",
":",
"return",
"sum",
"(",
"(",
"len",
"(",
"self",
".",
"style",
".",
"delimiter",
")",
",",
"self",
".",
"wide",
",",
"len",
"(",
"self",
".",
"style",
".",
"delimiter",
")",
",",
"len",
"(",
"u' '",
")",
",",
"UCS_PRINTLEN",
"+",
"2",
",",
"len",
"(",
"u' '",
")",
",",
"self",
".",
"style",
".",
"name_len",
",",
")",
")"
] | Width of a column segment. | [
"Width",
"of",
"a",
"column",
"segment",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L220-L228 |
433 | jquast/wcwidth | bin/wcwidth-browser.py | Screen.head_item | def head_item(self):
"""Text of a single column heading."""
delimiter = self.style.attr_minor(self.style.delimiter)
hint = self.style.header_hint * self.wide
heading = (u'{delimiter}{hint}{delimiter}'
.format(delimiter=delimiter, hint=hint))
alignment = lambda *args: (
self.term.rjust(*args) if self.style.alignment == 'right' else
self.term.ljust(*args))
txt = alignment(heading, self.hint_width, self.style.header_fill)
return self.style.attr_major(txt) | python | def head_item(self):
delimiter = self.style.attr_minor(self.style.delimiter)
hint = self.style.header_hint * self.wide
heading = (u'{delimiter}{hint}{delimiter}'
.format(delimiter=delimiter, hint=hint))
alignment = lambda *args: (
self.term.rjust(*args) if self.style.alignment == 'right' else
self.term.ljust(*args))
txt = alignment(heading, self.hint_width, self.style.header_fill)
return self.style.attr_major(txt) | [
"def",
"head_item",
"(",
"self",
")",
":",
"delimiter",
"=",
"self",
".",
"style",
".",
"attr_minor",
"(",
"self",
".",
"style",
".",
"delimiter",
")",
"hint",
"=",
"self",
".",
"style",
".",
"header_hint",
"*",
"self",
".",
"wide",
"heading",
"=",
"(",
"u'{delimiter}{hint}{delimiter}'",
".",
"format",
"(",
"delimiter",
"=",
"delimiter",
",",
"hint",
"=",
"hint",
")",
")",
"alignment",
"=",
"lambda",
"*",
"args",
":",
"(",
"self",
".",
"term",
".",
"rjust",
"(",
"*",
"args",
")",
"if",
"self",
".",
"style",
".",
"alignment",
"==",
"'right'",
"else",
"self",
".",
"term",
".",
"ljust",
"(",
"*",
"args",
")",
")",
"txt",
"=",
"alignment",
"(",
"heading",
",",
"self",
".",
"hint_width",
",",
"self",
".",
"style",
".",
"header_fill",
")",
"return",
"self",
".",
"style",
".",
"attr_major",
"(",
"txt",
")"
] | Text of a single column heading. | [
"Text",
"of",
"a",
"single",
"column",
"heading",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L231-L241 |
434 | jquast/wcwidth | bin/wcwidth-browser.py | Screen.msg_intro | def msg_intro(self):
"""Introductory message disabled above heading."""
delim = self.style.attr_minor(self.style.delimiter)
txt = self.intro_msg_fmt.format(delim=delim).rstrip()
return self.term.center(txt) | python | def msg_intro(self):
delim = self.style.attr_minor(self.style.delimiter)
txt = self.intro_msg_fmt.format(delim=delim).rstrip()
return self.term.center(txt) | [
"def",
"msg_intro",
"(",
"self",
")",
":",
"delim",
"=",
"self",
".",
"style",
".",
"attr_minor",
"(",
"self",
".",
"style",
".",
"delimiter",
")",
"txt",
"=",
"self",
".",
"intro_msg_fmt",
".",
"format",
"(",
"delim",
"=",
"delim",
")",
".",
"rstrip",
"(",
")",
"return",
"self",
".",
"term",
".",
"center",
"(",
"txt",
")"
] | Introductory message disabled above heading. | [
"Introductory",
"message",
"disabled",
"above",
"heading",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L244-L248 |
435 | jquast/wcwidth | bin/wcwidth-browser.py | Screen.num_columns | def num_columns(self):
"""Number of columns displayed."""
if self.term.is_a_tty:
return self.term.width // self.hint_width
return 1 | python | def num_columns(self):
if self.term.is_a_tty:
return self.term.width // self.hint_width
return 1 | [
"def",
"num_columns",
"(",
"self",
")",
":",
"if",
"self",
".",
"term",
".",
"is_a_tty",
":",
"return",
"self",
".",
"term",
".",
"width",
"//",
"self",
".",
"hint_width",
"return",
"1"
] | Number of columns displayed. | [
"Number",
"of",
"columns",
"displayed",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L256-L260 |
436 | jquast/wcwidth | bin/wcwidth-browser.py | Pager.on_resize | def on_resize(self, *args):
"""Signal handler callback for SIGWINCH."""
# pylint: disable=W0613
# Unused argument 'args'
self.screen.style.name_len = min(self.screen.style.name_len,
self.term.width - 15)
assert self.term.width >= self.screen.hint_width, (
'Screen to small {}, must be at least {}'.format(
self.term.width, self.screen.hint_width))
self._set_lastpage()
self.dirty = self.STATE_REFRESH | python | def on_resize(self, *args):
# pylint: disable=W0613
# Unused argument 'args'
self.screen.style.name_len = min(self.screen.style.name_len,
self.term.width - 15)
assert self.term.width >= self.screen.hint_width, (
'Screen to small {}, must be at least {}'.format(
self.term.width, self.screen.hint_width))
self._set_lastpage()
self.dirty = self.STATE_REFRESH | [
"def",
"on_resize",
"(",
"self",
",",
"*",
"args",
")",
":",
"# pylint: disable=W0613",
"# Unused argument 'args'",
"self",
".",
"screen",
".",
"style",
".",
"name_len",
"=",
"min",
"(",
"self",
".",
"screen",
".",
"style",
".",
"name_len",
",",
"self",
".",
"term",
".",
"width",
"-",
"15",
")",
"assert",
"self",
".",
"term",
".",
"width",
">=",
"self",
".",
"screen",
".",
"hint_width",
",",
"(",
"'Screen to small {}, must be at least {}'",
".",
"format",
"(",
"self",
".",
"term",
".",
"width",
",",
"self",
".",
"screen",
".",
"hint_width",
")",
")",
"self",
".",
"_set_lastpage",
"(",
")",
"self",
".",
"dirty",
"=",
"self",
".",
"STATE_REFRESH"
] | Signal handler callback for SIGWINCH. | [
"Signal",
"handler",
"callback",
"for",
"SIGWINCH",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L305-L315 |
437 | jquast/wcwidth | bin/wcwidth-browser.py | Pager._set_lastpage | def _set_lastpage(self):
"""Calculate value of class attribute ``last_page``."""
self.last_page = (len(self._page_data) - 1) // self.screen.page_size | python | def _set_lastpage(self):
self.last_page = (len(self._page_data) - 1) // self.screen.page_size | [
"def",
"_set_lastpage",
"(",
"self",
")",
":",
"self",
".",
"last_page",
"=",
"(",
"len",
"(",
"self",
".",
"_page_data",
")",
"-",
"1",
")",
"//",
"self",
".",
"screen",
".",
"page_size"
] | Calculate value of class attribute ``last_page``. | [
"Calculate",
"value",
"of",
"class",
"attribute",
"last_page",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L317-L319 |
438 | jquast/wcwidth | bin/wcwidth-browser.py | Pager.display_initialize | def display_initialize(self):
"""Display 'please wait' message, and narrow build warning."""
echo(self.term.home + self.term.clear)
echo(self.term.move_y(self.term.height // 2))
echo(self.term.center('Initializing page data ...').rstrip())
flushout()
if LIMIT_UCS == 0x10000:
echo('\n\n')
echo(self.term.blink_red(self.term.center(
'narrow Python build: upperbound value is {n}.'
.format(n=LIMIT_UCS)).rstrip()))
echo('\n\n')
flushout() | python | def display_initialize(self):
echo(self.term.home + self.term.clear)
echo(self.term.move_y(self.term.height // 2))
echo(self.term.center('Initializing page data ...').rstrip())
flushout()
if LIMIT_UCS == 0x10000:
echo('\n\n')
echo(self.term.blink_red(self.term.center(
'narrow Python build: upperbound value is {n}.'
.format(n=LIMIT_UCS)).rstrip()))
echo('\n\n')
flushout() | [
"def",
"display_initialize",
"(",
"self",
")",
":",
"echo",
"(",
"self",
".",
"term",
".",
"home",
"+",
"self",
".",
"term",
".",
"clear",
")",
"echo",
"(",
"self",
".",
"term",
".",
"move_y",
"(",
"self",
".",
"term",
".",
"height",
"//",
"2",
")",
")",
"echo",
"(",
"self",
".",
"term",
".",
"center",
"(",
"'Initializing page data ...'",
")",
".",
"rstrip",
"(",
")",
")",
"flushout",
"(",
")",
"if",
"LIMIT_UCS",
"==",
"0x10000",
":",
"echo",
"(",
"'\\n\\n'",
")",
"echo",
"(",
"self",
".",
"term",
".",
"blink_red",
"(",
"self",
".",
"term",
".",
"center",
"(",
"'narrow Python build: upperbound value is {n}.'",
".",
"format",
"(",
"n",
"=",
"LIMIT_UCS",
")",
")",
".",
"rstrip",
"(",
")",
")",
")",
"echo",
"(",
"'\\n\\n'",
")",
"flushout",
"(",
")"
] | Display 'please wait' message, and narrow build warning. | [
"Display",
"please",
"wait",
"message",
"and",
"narrow",
"build",
"warning",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L321-L334 |
439 | jquast/wcwidth | bin/wcwidth-browser.py | Pager.initialize_page_data | def initialize_page_data(self):
"""Initialize the page data for the given screen."""
if self.term.is_a_tty:
self.display_initialize()
self.character_generator = self.character_factory(self.screen.wide)
page_data = list()
while True:
try:
page_data.append(next(self.character_generator))
except StopIteration:
break
if LIMIT_UCS == 0x10000:
echo(self.term.center('press any key.').rstrip())
flushout()
self.term.inkey(timeout=None)
return page_data | python | def initialize_page_data(self):
if self.term.is_a_tty:
self.display_initialize()
self.character_generator = self.character_factory(self.screen.wide)
page_data = list()
while True:
try:
page_data.append(next(self.character_generator))
except StopIteration:
break
if LIMIT_UCS == 0x10000:
echo(self.term.center('press any key.').rstrip())
flushout()
self.term.inkey(timeout=None)
return page_data | [
"def",
"initialize_page_data",
"(",
"self",
")",
":",
"if",
"self",
".",
"term",
".",
"is_a_tty",
":",
"self",
".",
"display_initialize",
"(",
")",
"self",
".",
"character_generator",
"=",
"self",
".",
"character_factory",
"(",
"self",
".",
"screen",
".",
"wide",
")",
"page_data",
"=",
"list",
"(",
")",
"while",
"True",
":",
"try",
":",
"page_data",
".",
"append",
"(",
"next",
"(",
"self",
".",
"character_generator",
")",
")",
"except",
"StopIteration",
":",
"break",
"if",
"LIMIT_UCS",
"==",
"0x10000",
":",
"echo",
"(",
"self",
".",
"term",
".",
"center",
"(",
"'press any key.'",
")",
".",
"rstrip",
"(",
")",
")",
"flushout",
"(",
")",
"self",
".",
"term",
".",
"inkey",
"(",
"timeout",
"=",
"None",
")",
"return",
"page_data"
] | Initialize the page data for the given screen. | [
"Initialize",
"the",
"page",
"data",
"for",
"the",
"given",
"screen",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L336-L351 |
440 | jquast/wcwidth | bin/wcwidth-browser.py | Pager.page_data | def page_data(self, idx, offset):
"""
Return character data for page of given index and offset.
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: list of tuples in form of ``(ucs, name)``
:rtype: list[(unicode, unicode)]
"""
size = self.screen.page_size
while offset < 0 and idx:
offset += size
idx -= 1
offset = max(0, offset)
while offset >= size:
offset -= size
idx += 1
if idx == self.last_page:
offset = 0
idx = min(max(0, idx), self.last_page)
start = (idx * self.screen.page_size) + offset
end = start + self.screen.page_size
return (idx, offset), self._page_data[start:end] | python | def page_data(self, idx, offset):
size = self.screen.page_size
while offset < 0 and idx:
offset += size
idx -= 1
offset = max(0, offset)
while offset >= size:
offset -= size
idx += 1
if idx == self.last_page:
offset = 0
idx = min(max(0, idx), self.last_page)
start = (idx * self.screen.page_size) + offset
end = start + self.screen.page_size
return (idx, offset), self._page_data[start:end] | [
"def",
"page_data",
"(",
"self",
",",
"idx",
",",
"offset",
")",
":",
"size",
"=",
"self",
".",
"screen",
".",
"page_size",
"while",
"offset",
"<",
"0",
"and",
"idx",
":",
"offset",
"+=",
"size",
"idx",
"-=",
"1",
"offset",
"=",
"max",
"(",
"0",
",",
"offset",
")",
"while",
"offset",
">=",
"size",
":",
"offset",
"-=",
"size",
"idx",
"+=",
"1",
"if",
"idx",
"==",
"self",
".",
"last_page",
":",
"offset",
"=",
"0",
"idx",
"=",
"min",
"(",
"max",
"(",
"0",
",",
"idx",
")",
",",
"self",
".",
"last_page",
")",
"start",
"=",
"(",
"idx",
"*",
"self",
".",
"screen",
".",
"page_size",
")",
"+",
"offset",
"end",
"=",
"start",
"+",
"self",
".",
"screen",
".",
"page_size",
"return",
"(",
"idx",
",",
"offset",
")",
",",
"self",
".",
"_page_data",
"[",
"start",
":",
"end",
"]"
] | Return character data for page of given index and offset.
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: list of tuples in form of ``(ucs, name)``
:rtype: list[(unicode, unicode)] | [
"Return",
"character",
"data",
"for",
"page",
"of",
"given",
"index",
"and",
"offset",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L353-L381 |
441 | jquast/wcwidth | bin/wcwidth-browser.py | Pager._run_notty | def _run_notty(self, writer):
"""Pager run method for terminals that are not a tty."""
page_idx = page_offset = 0
while True:
npage_idx, _ = self.draw(writer, page_idx + 1, page_offset)
if npage_idx == self.last_page:
# page displayed was last page, quit.
break
page_idx = npage_idx
self.dirty = self.STATE_DIRTY
return | python | def _run_notty(self, writer):
page_idx = page_offset = 0
while True:
npage_idx, _ = self.draw(writer, page_idx + 1, page_offset)
if npage_idx == self.last_page:
# page displayed was last page, quit.
break
page_idx = npage_idx
self.dirty = self.STATE_DIRTY
return | [
"def",
"_run_notty",
"(",
"self",
",",
"writer",
")",
":",
"page_idx",
"=",
"page_offset",
"=",
"0",
"while",
"True",
":",
"npage_idx",
",",
"_",
"=",
"self",
".",
"draw",
"(",
"writer",
",",
"page_idx",
"+",
"1",
",",
"page_offset",
")",
"if",
"npage_idx",
"==",
"self",
".",
"last_page",
":",
"# page displayed was last page, quit.",
"break",
"page_idx",
"=",
"npage_idx",
"self",
".",
"dirty",
"=",
"self",
".",
"STATE_DIRTY",
"return"
] | Pager run method for terminals that are not a tty. | [
"Pager",
"run",
"method",
"for",
"terminals",
"that",
"are",
"not",
"a",
"tty",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L383-L393 |
442 | jquast/wcwidth | bin/wcwidth-browser.py | Pager._run_tty | def _run_tty(self, writer, reader):
"""Pager run method for terminals that are a tty."""
# allow window-change signal to reflow screen
signal.signal(signal.SIGWINCH, self.on_resize)
page_idx = page_offset = 0
while True:
if self.dirty:
page_idx, page_offset = self.draw(writer,
page_idx,
page_offset)
self.dirty = self.STATE_CLEAN
inp = reader(timeout=0.25)
if inp is not None:
nxt, noff = self.process_keystroke(inp,
page_idx,
page_offset)
if not self.dirty:
self.dirty = nxt != page_idx or noff != page_offset
page_idx, page_offset = nxt, noff
if page_idx == -1:
return | python | def _run_tty(self, writer, reader):
# allow window-change signal to reflow screen
signal.signal(signal.SIGWINCH, self.on_resize)
page_idx = page_offset = 0
while True:
if self.dirty:
page_idx, page_offset = self.draw(writer,
page_idx,
page_offset)
self.dirty = self.STATE_CLEAN
inp = reader(timeout=0.25)
if inp is not None:
nxt, noff = self.process_keystroke(inp,
page_idx,
page_offset)
if not self.dirty:
self.dirty = nxt != page_idx or noff != page_offset
page_idx, page_offset = nxt, noff
if page_idx == -1:
return | [
"def",
"_run_tty",
"(",
"self",
",",
"writer",
",",
"reader",
")",
":",
"# allow window-change signal to reflow screen",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGWINCH",
",",
"self",
".",
"on_resize",
")",
"page_idx",
"=",
"page_offset",
"=",
"0",
"while",
"True",
":",
"if",
"self",
".",
"dirty",
":",
"page_idx",
",",
"page_offset",
"=",
"self",
".",
"draw",
"(",
"writer",
",",
"page_idx",
",",
"page_offset",
")",
"self",
".",
"dirty",
"=",
"self",
".",
"STATE_CLEAN",
"inp",
"=",
"reader",
"(",
"timeout",
"=",
"0.25",
")",
"if",
"inp",
"is",
"not",
"None",
":",
"nxt",
",",
"noff",
"=",
"self",
".",
"process_keystroke",
"(",
"inp",
",",
"page_idx",
",",
"page_offset",
")",
"if",
"not",
"self",
".",
"dirty",
":",
"self",
".",
"dirty",
"=",
"nxt",
"!=",
"page_idx",
"or",
"noff",
"!=",
"page_offset",
"page_idx",
",",
"page_offset",
"=",
"nxt",
",",
"noff",
"if",
"page_idx",
"==",
"-",
"1",
":",
"return"
] | Pager run method for terminals that are a tty. | [
"Pager",
"run",
"method",
"for",
"terminals",
"that",
"are",
"a",
"tty",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L395-L416 |
443 | jquast/wcwidth | bin/wcwidth-browser.py | Pager.run | def run(self, writer, reader):
"""
Pager entry point.
In interactive mode (terminal is a tty), run until
``process_keystroke()`` detects quit keystroke ('q'). In
non-interactive mode, exit after displaying all unicode points.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param reader: callable reads keystrokes from input stream, sending
instance of blessed.keyboard.Keystroke.
:type reader: callable
"""
self._page_data = self.initialize_page_data()
self._set_lastpage()
if not self.term.is_a_tty:
self._run_notty(writer)
else:
self._run_tty(writer, reader) | python | def run(self, writer, reader):
self._page_data = self.initialize_page_data()
self._set_lastpage()
if not self.term.is_a_tty:
self._run_notty(writer)
else:
self._run_tty(writer, reader) | [
"def",
"run",
"(",
"self",
",",
"writer",
",",
"reader",
")",
":",
"self",
".",
"_page_data",
"=",
"self",
".",
"initialize_page_data",
"(",
")",
"self",
".",
"_set_lastpage",
"(",
")",
"if",
"not",
"self",
".",
"term",
".",
"is_a_tty",
":",
"self",
".",
"_run_notty",
"(",
"writer",
")",
"else",
":",
"self",
".",
"_run_tty",
"(",
"writer",
",",
"reader",
")"
] | Pager entry point.
In interactive mode (terminal is a tty), run until
``process_keystroke()`` detects quit keystroke ('q'). In
non-interactive mode, exit after displaying all unicode points.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param reader: callable reads keystrokes from input stream, sending
instance of blessed.keyboard.Keystroke.
:type reader: callable | [
"Pager",
"entry",
"point",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L418-L437 |
444 | jquast/wcwidth | bin/wcwidth-browser.py | Pager.process_keystroke | def process_keystroke(self, inp, idx, offset):
"""
Process keystroke ``inp``, adjusting screen parameters.
:param inp: return value of Terminal.inkey().
:type inp: blessed.keyboard.Keystroke
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int)
"""
if inp.lower() in (u'q', u'Q'):
# exit
return (-1, -1)
self._process_keystroke_commands(inp)
idx, offset = self._process_keystroke_movement(inp, idx, offset)
return idx, offset | python | def process_keystroke(self, inp, idx, offset):
if inp.lower() in (u'q', u'Q'):
# exit
return (-1, -1)
self._process_keystroke_commands(inp)
idx, offset = self._process_keystroke_movement(inp, idx, offset)
return idx, offset | [
"def",
"process_keystroke",
"(",
"self",
",",
"inp",
",",
"idx",
",",
"offset",
")",
":",
"if",
"inp",
".",
"lower",
"(",
")",
"in",
"(",
"u'q'",
",",
"u'Q'",
")",
":",
"# exit",
"return",
"(",
"-",
"1",
",",
"-",
"1",
")",
"self",
".",
"_process_keystroke_commands",
"(",
"inp",
")",
"idx",
",",
"offset",
"=",
"self",
".",
"_process_keystroke_movement",
"(",
"inp",
",",
"idx",
",",
"offset",
")",
"return",
"idx",
",",
"offset"
] | Process keystroke ``inp``, adjusting screen parameters.
:param inp: return value of Terminal.inkey().
:type inp: blessed.keyboard.Keystroke
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int) | [
"Process",
"keystroke",
"inp",
"adjusting",
"screen",
"parameters",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L439-L457 |
445 | jquast/wcwidth | bin/wcwidth-browser.py | Pager._process_keystroke_movement | def _process_keystroke_movement(self, inp, idx, offset):
"""Process keystrokes that adjust index and offset."""
term = self.term
if inp in (u'y', u'k') or inp.code in (term.KEY_UP,):
# scroll backward 1 line
idx, offset = (idx, offset - self.screen.num_columns)
elif inp in (u'e', u'j') or inp.code in (term.KEY_ENTER,
term.KEY_DOWN,):
# scroll forward 1 line
idx, offset = (idx, offset + self.screen.num_columns)
elif inp in (u'f', u' ') or inp.code in (term.KEY_PGDOWN,):
# scroll forward 1 page
idx, offset = (idx + 1, offset)
elif inp == u'b' or inp.code in (term.KEY_PGUP,):
# scroll backward 1 page
idx, offset = (max(0, idx - 1), offset)
elif inp.code in (term.KEY_SDOWN,):
# scroll forward 10 pages
idx, offset = (max(0, idx + 10), offset)
elif inp.code in (term.KEY_SUP,):
# scroll forward 10 pages
idx, offset = (max(0, idx - 10), offset)
elif inp.code == term.KEY_HOME:
# top
idx, offset = (0, 0)
elif inp.code == term.KEY_END:
# bottom
idx, offset = (self.last_page, 0)
return idx, offset | python | def _process_keystroke_movement(self, inp, idx, offset):
term = self.term
if inp in (u'y', u'k') or inp.code in (term.KEY_UP,):
# scroll backward 1 line
idx, offset = (idx, offset - self.screen.num_columns)
elif inp in (u'e', u'j') or inp.code in (term.KEY_ENTER,
term.KEY_DOWN,):
# scroll forward 1 line
idx, offset = (idx, offset + self.screen.num_columns)
elif inp in (u'f', u' ') or inp.code in (term.KEY_PGDOWN,):
# scroll forward 1 page
idx, offset = (idx + 1, offset)
elif inp == u'b' or inp.code in (term.KEY_PGUP,):
# scroll backward 1 page
idx, offset = (max(0, idx - 1), offset)
elif inp.code in (term.KEY_SDOWN,):
# scroll forward 10 pages
idx, offset = (max(0, idx + 10), offset)
elif inp.code in (term.KEY_SUP,):
# scroll forward 10 pages
idx, offset = (max(0, idx - 10), offset)
elif inp.code == term.KEY_HOME:
# top
idx, offset = (0, 0)
elif inp.code == term.KEY_END:
# bottom
idx, offset = (self.last_page, 0)
return idx, offset | [
"def",
"_process_keystroke_movement",
"(",
"self",
",",
"inp",
",",
"idx",
",",
"offset",
")",
":",
"term",
"=",
"self",
".",
"term",
"if",
"inp",
"in",
"(",
"u'y'",
",",
"u'k'",
")",
"or",
"inp",
".",
"code",
"in",
"(",
"term",
".",
"KEY_UP",
",",
")",
":",
"# scroll backward 1 line",
"idx",
",",
"offset",
"=",
"(",
"idx",
",",
"offset",
"-",
"self",
".",
"screen",
".",
"num_columns",
")",
"elif",
"inp",
"in",
"(",
"u'e'",
",",
"u'j'",
")",
"or",
"inp",
".",
"code",
"in",
"(",
"term",
".",
"KEY_ENTER",
",",
"term",
".",
"KEY_DOWN",
",",
")",
":",
"# scroll forward 1 line",
"idx",
",",
"offset",
"=",
"(",
"idx",
",",
"offset",
"+",
"self",
".",
"screen",
".",
"num_columns",
")",
"elif",
"inp",
"in",
"(",
"u'f'",
",",
"u' '",
")",
"or",
"inp",
".",
"code",
"in",
"(",
"term",
".",
"KEY_PGDOWN",
",",
")",
":",
"# scroll forward 1 page",
"idx",
",",
"offset",
"=",
"(",
"idx",
"+",
"1",
",",
"offset",
")",
"elif",
"inp",
"==",
"u'b'",
"or",
"inp",
".",
"code",
"in",
"(",
"term",
".",
"KEY_PGUP",
",",
")",
":",
"# scroll backward 1 page",
"idx",
",",
"offset",
"=",
"(",
"max",
"(",
"0",
",",
"idx",
"-",
"1",
")",
",",
"offset",
")",
"elif",
"inp",
".",
"code",
"in",
"(",
"term",
".",
"KEY_SDOWN",
",",
")",
":",
"# scroll forward 10 pages",
"idx",
",",
"offset",
"=",
"(",
"max",
"(",
"0",
",",
"idx",
"+",
"10",
")",
",",
"offset",
")",
"elif",
"inp",
".",
"code",
"in",
"(",
"term",
".",
"KEY_SUP",
",",
")",
":",
"# scroll forward 10 pages",
"idx",
",",
"offset",
"=",
"(",
"max",
"(",
"0",
",",
"idx",
"-",
"10",
")",
",",
"offset",
")",
"elif",
"inp",
".",
"code",
"==",
"term",
".",
"KEY_HOME",
":",
"# top",
"idx",
",",
"offset",
"=",
"(",
"0",
",",
"0",
")",
"elif",
"inp",
".",
"code",
"==",
"term",
".",
"KEY_END",
":",
"# bottom",
"idx",
",",
"offset",
"=",
"(",
"self",
".",
"last_page",
",",
"0",
")",
"return",
"idx",
",",
"offset"
] | Process keystrokes that adjust index and offset. | [
"Process",
"keystrokes",
"that",
"adjust",
"index",
"and",
"offset",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L483-L511 |
446 | jquast/wcwidth | bin/wcwidth-browser.py | Pager.draw | def draw(self, writer, idx, offset):
"""
Draw the current page view to ``writer``.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param idx: current page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int)
"""
# as our screen can be resized while we're mid-calculation,
# our self.dirty flag can become re-toggled; because we are
# not re-flowing our pagination, we must begin over again.
while self.dirty:
self.draw_heading(writer)
self.dirty = self.STATE_CLEAN
(idx, offset), data = self.page_data(idx, offset)
for txt in self.page_view(data):
writer(txt)
self.draw_status(writer, idx)
flushout()
return idx, offset | python | def draw(self, writer, idx, offset):
# as our screen can be resized while we're mid-calculation,
# our self.dirty flag can become re-toggled; because we are
# not re-flowing our pagination, we must begin over again.
while self.dirty:
self.draw_heading(writer)
self.dirty = self.STATE_CLEAN
(idx, offset), data = self.page_data(idx, offset)
for txt in self.page_view(data):
writer(txt)
self.draw_status(writer, idx)
flushout()
return idx, offset | [
"def",
"draw",
"(",
"self",
",",
"writer",
",",
"idx",
",",
"offset",
")",
":",
"# as our screen can be resized while we're mid-calculation,",
"# our self.dirty flag can become re-toggled; because we are",
"# not re-flowing our pagination, we must begin over again.",
"while",
"self",
".",
"dirty",
":",
"self",
".",
"draw_heading",
"(",
"writer",
")",
"self",
".",
"dirty",
"=",
"self",
".",
"STATE_CLEAN",
"(",
"idx",
",",
"offset",
")",
",",
"data",
"=",
"self",
".",
"page_data",
"(",
"idx",
",",
"offset",
")",
"for",
"txt",
"in",
"self",
".",
"page_view",
"(",
"data",
")",
":",
"writer",
"(",
"txt",
")",
"self",
".",
"draw_status",
"(",
"writer",
",",
"idx",
")",
"flushout",
"(",
")",
"return",
"idx",
",",
"offset"
] | Draw the current page view to ``writer``.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param idx: current page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int) | [
"Draw",
"the",
"current",
"page",
"view",
"to",
"writer",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L513-L537 |
447 | jquast/wcwidth | bin/wcwidth-browser.py | Pager.draw_heading | def draw_heading(self, writer):
"""
Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``.
"""
if self.dirty == self.STATE_REFRESH:
writer(u''.join(
(self.term.home, self.term.clear,
self.screen.msg_intro, '\n',
self.screen.header, '\n',)))
return True | python | def draw_heading(self, writer):
if self.dirty == self.STATE_REFRESH:
writer(u''.join(
(self.term.home, self.term.clear,
self.screen.msg_intro, '\n',
self.screen.header, '\n',)))
return True | [
"def",
"draw_heading",
"(",
"self",
",",
"writer",
")",
":",
"if",
"self",
".",
"dirty",
"==",
"self",
".",
"STATE_REFRESH",
":",
"writer",
"(",
"u''",
".",
"join",
"(",
"(",
"self",
".",
"term",
".",
"home",
",",
"self",
".",
"term",
".",
"clear",
",",
"self",
".",
"screen",
".",
"msg_intro",
",",
"'\\n'",
",",
"self",
".",
"screen",
".",
"header",
",",
"'\\n'",
",",
")",
")",
")",
"return",
"True"
] | Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``. | [
"Conditionally",
"redraw",
"screen",
"when",
"dirty",
"attribute",
"is",
"valued",
"REFRESH",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L539-L554 |
448 | jquast/wcwidth | bin/wcwidth-browser.py | Pager.draw_status | def draw_status(self, writer, idx):
"""
Conditionally draw status bar when output terminal is a tty.
:param writer: callable writes to output stream, receiving unicode.
:param idx: current page position index.
:type idx: int
"""
if self.term.is_a_tty:
writer(self.term.hide_cursor())
style = self.screen.style
writer(self.term.move(self.term.height - 1))
if idx == self.last_page:
last_end = u'(END)'
else:
last_end = u'/{0}'.format(self.last_page)
txt = (u'Page {idx}{last_end} - '
u'{q} to quit, [keys: {keyset}]'
.format(idx=style.attr_minor(u'{0}'.format(idx)),
last_end=style.attr_major(last_end),
keyset=style.attr_major('kjfb12-='),
q=style.attr_minor(u'q')))
writer(self.term.center(txt).rstrip()) | python | def draw_status(self, writer, idx):
if self.term.is_a_tty:
writer(self.term.hide_cursor())
style = self.screen.style
writer(self.term.move(self.term.height - 1))
if idx == self.last_page:
last_end = u'(END)'
else:
last_end = u'/{0}'.format(self.last_page)
txt = (u'Page {idx}{last_end} - '
u'{q} to quit, [keys: {keyset}]'
.format(idx=style.attr_minor(u'{0}'.format(idx)),
last_end=style.attr_major(last_end),
keyset=style.attr_major('kjfb12-='),
q=style.attr_minor(u'q')))
writer(self.term.center(txt).rstrip()) | [
"def",
"draw_status",
"(",
"self",
",",
"writer",
",",
"idx",
")",
":",
"if",
"self",
".",
"term",
".",
"is_a_tty",
":",
"writer",
"(",
"self",
".",
"term",
".",
"hide_cursor",
"(",
")",
")",
"style",
"=",
"self",
".",
"screen",
".",
"style",
"writer",
"(",
"self",
".",
"term",
".",
"move",
"(",
"self",
".",
"term",
".",
"height",
"-",
"1",
")",
")",
"if",
"idx",
"==",
"self",
".",
"last_page",
":",
"last_end",
"=",
"u'(END)'",
"else",
":",
"last_end",
"=",
"u'/{0}'",
".",
"format",
"(",
"self",
".",
"last_page",
")",
"txt",
"=",
"(",
"u'Page {idx}{last_end} - '",
"u'{q} to quit, [keys: {keyset}]'",
".",
"format",
"(",
"idx",
"=",
"style",
".",
"attr_minor",
"(",
"u'{0}'",
".",
"format",
"(",
"idx",
")",
")",
",",
"last_end",
"=",
"style",
".",
"attr_major",
"(",
"last_end",
")",
",",
"keyset",
"=",
"style",
".",
"attr_major",
"(",
"'kjfb12-='",
")",
",",
"q",
"=",
"style",
".",
"attr_minor",
"(",
"u'q'",
")",
")",
")",
"writer",
"(",
"self",
".",
"term",
".",
"center",
"(",
"txt",
")",
".",
"rstrip",
"(",
")",
")"
] | Conditionally draw status bar when output terminal is a tty.
:param writer: callable writes to output stream, receiving unicode.
:param idx: current page position index.
:type idx: int | [
"Conditionally",
"draw",
"status",
"bar",
"when",
"output",
"terminal",
"is",
"a",
"tty",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L556-L578 |
449 | jquast/wcwidth | bin/wcwidth-browser.py | Pager.page_view | def page_view(self, data):
"""
Generator yields text to be displayed for the current unicode pageview.
:param data: The current page's data as tuple of ``(ucs, name)``.
:rtype: generator
"""
if self.term.is_a_tty:
yield self.term.move(self.screen.row_begins, 0)
# sequence clears to end-of-line
clear_eol = self.term.clear_eol
# sequence clears to end-of-screen
clear_eos = self.term.clear_eos
# track our current column and row, where column is
# the whole segment of unicode value text, and draw
# only self.screen.num_columns before end-of-line.
#
# use clear_eol at end of each row to erase over any
# "ghosted" text, and clear_eos at end of screen to
# clear the same, especially for the final page which
# is often short.
col = 0
for ucs, name in data:
val = self.text_entry(ucs, name)
col += 1
if col == self.screen.num_columns:
col = 0
if self.term.is_a_tty:
val = u''.join((val, clear_eol, u'\n'))
else:
val = u''.join((val.rstrip(), u'\n'))
yield val
if self.term.is_a_tty:
yield u''.join((clear_eol, u'\n', clear_eos)) | python | def page_view(self, data):
if self.term.is_a_tty:
yield self.term.move(self.screen.row_begins, 0)
# sequence clears to end-of-line
clear_eol = self.term.clear_eol
# sequence clears to end-of-screen
clear_eos = self.term.clear_eos
# track our current column and row, where column is
# the whole segment of unicode value text, and draw
# only self.screen.num_columns before end-of-line.
#
# use clear_eol at end of each row to erase over any
# "ghosted" text, and clear_eos at end of screen to
# clear the same, especially for the final page which
# is often short.
col = 0
for ucs, name in data:
val = self.text_entry(ucs, name)
col += 1
if col == self.screen.num_columns:
col = 0
if self.term.is_a_tty:
val = u''.join((val, clear_eol, u'\n'))
else:
val = u''.join((val.rstrip(), u'\n'))
yield val
if self.term.is_a_tty:
yield u''.join((clear_eol, u'\n', clear_eos)) | [
"def",
"page_view",
"(",
"self",
",",
"data",
")",
":",
"if",
"self",
".",
"term",
".",
"is_a_tty",
":",
"yield",
"self",
".",
"term",
".",
"move",
"(",
"self",
".",
"screen",
".",
"row_begins",
",",
"0",
")",
"# sequence clears to end-of-line",
"clear_eol",
"=",
"self",
".",
"term",
".",
"clear_eol",
"# sequence clears to end-of-screen",
"clear_eos",
"=",
"self",
".",
"term",
".",
"clear_eos",
"# track our current column and row, where column is",
"# the whole segment of unicode value text, and draw",
"# only self.screen.num_columns before end-of-line.",
"#",
"# use clear_eol at end of each row to erase over any",
"# \"ghosted\" text, and clear_eos at end of screen to",
"# clear the same, especially for the final page which",
"# is often short.",
"col",
"=",
"0",
"for",
"ucs",
",",
"name",
"in",
"data",
":",
"val",
"=",
"self",
".",
"text_entry",
"(",
"ucs",
",",
"name",
")",
"col",
"+=",
"1",
"if",
"col",
"==",
"self",
".",
"screen",
".",
"num_columns",
":",
"col",
"=",
"0",
"if",
"self",
".",
"term",
".",
"is_a_tty",
":",
"val",
"=",
"u''",
".",
"join",
"(",
"(",
"val",
",",
"clear_eol",
",",
"u'\\n'",
")",
")",
"else",
":",
"val",
"=",
"u''",
".",
"join",
"(",
"(",
"val",
".",
"rstrip",
"(",
")",
",",
"u'\\n'",
")",
")",
"yield",
"val",
"if",
"self",
".",
"term",
".",
"is_a_tty",
":",
"yield",
"u''",
".",
"join",
"(",
"(",
"clear_eol",
",",
"u'\\n'",
",",
"clear_eos",
")",
")"
] | Generator yields text to be displayed for the current unicode pageview.
:param data: The current page's data as tuple of ``(ucs, name)``.
:rtype: generator | [
"Generator",
"yields",
"text",
"to",
"be",
"displayed",
"for",
"the",
"current",
"unicode",
"pageview",
"."
] | 78800b68911880ef4ef95ae83886154710441871 | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L580-L615 |
450 | matthew-brett/delocate | delocate/tools.py | back_tick | def back_tick(cmd, ret_err=False, as_str=True, raise_err=None):
""" Run command `cmd`, return stdout, or stdout, stderr if `ret_err`
Roughly equivalent to ``check_output`` in Python 2.7
Parameters
----------
cmd : sequence
command to execute
ret_err : bool, optional
If True, return stderr in addition to stdout. If False, just return
stdout
as_str : bool, optional
Whether to decode outputs to unicode string on exit.
raise_err : None or bool, optional
If True, raise RuntimeError for non-zero return code. If None, set to
True when `ret_err` is False, False if `ret_err` is True
Returns
-------
out : str or tuple
If `ret_err` is False, return stripped string containing stdout from
`cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where
``stdout`` is the stripped stdout, and ``stderr`` is the stripped
stderr.
Raises
------
Raises RuntimeError if command returns non-zero exit code and `raise_err`
is True
"""
if raise_err is None:
raise_err = False if ret_err else True
cmd_is_seq = isinstance(cmd, (list, tuple))
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=not cmd_is_seq)
out, err = proc.communicate()
retcode = proc.returncode
cmd_str = ' '.join(cmd) if cmd_is_seq else cmd
if retcode is None:
proc.terminate()
raise RuntimeError(cmd_str + ' process did not terminate')
if raise_err and retcode != 0:
raise RuntimeError('{0} returned code {1} with error {2}'.format(
cmd_str, retcode, err.decode('latin-1')))
out = out.strip()
if as_str:
out = out.decode('latin-1')
if not ret_err:
return out
err = err.strip()
if as_str:
err = err.decode('latin-1')
return out, err | python | def back_tick(cmd, ret_err=False, as_str=True, raise_err=None):
if raise_err is None:
raise_err = False if ret_err else True
cmd_is_seq = isinstance(cmd, (list, tuple))
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=not cmd_is_seq)
out, err = proc.communicate()
retcode = proc.returncode
cmd_str = ' '.join(cmd) if cmd_is_seq else cmd
if retcode is None:
proc.terminate()
raise RuntimeError(cmd_str + ' process did not terminate')
if raise_err and retcode != 0:
raise RuntimeError('{0} returned code {1} with error {2}'.format(
cmd_str, retcode, err.decode('latin-1')))
out = out.strip()
if as_str:
out = out.decode('latin-1')
if not ret_err:
return out
err = err.strip()
if as_str:
err = err.decode('latin-1')
return out, err | [
"def",
"back_tick",
"(",
"cmd",
",",
"ret_err",
"=",
"False",
",",
"as_str",
"=",
"True",
",",
"raise_err",
"=",
"None",
")",
":",
"if",
"raise_err",
"is",
"None",
":",
"raise_err",
"=",
"False",
"if",
"ret_err",
"else",
"True",
"cmd_is_seq",
"=",
"isinstance",
"(",
"cmd",
",",
"(",
"list",
",",
"tuple",
")",
")",
"proc",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"shell",
"=",
"not",
"cmd_is_seq",
")",
"out",
",",
"err",
"=",
"proc",
".",
"communicate",
"(",
")",
"retcode",
"=",
"proc",
".",
"returncode",
"cmd_str",
"=",
"' '",
".",
"join",
"(",
"cmd",
")",
"if",
"cmd_is_seq",
"else",
"cmd",
"if",
"retcode",
"is",
"None",
":",
"proc",
".",
"terminate",
"(",
")",
"raise",
"RuntimeError",
"(",
"cmd_str",
"+",
"' process did not terminate'",
")",
"if",
"raise_err",
"and",
"retcode",
"!=",
"0",
":",
"raise",
"RuntimeError",
"(",
"'{0} returned code {1} with error {2}'",
".",
"format",
"(",
"cmd_str",
",",
"retcode",
",",
"err",
".",
"decode",
"(",
"'latin-1'",
")",
")",
")",
"out",
"=",
"out",
".",
"strip",
"(",
")",
"if",
"as_str",
":",
"out",
"=",
"out",
".",
"decode",
"(",
"'latin-1'",
")",
"if",
"not",
"ret_err",
":",
"return",
"out",
"err",
"=",
"err",
".",
"strip",
"(",
")",
"if",
"as_str",
":",
"err",
"=",
"err",
".",
"decode",
"(",
"'latin-1'",
")",
"return",
"out",
",",
"err"
] | Run command `cmd`, return stdout, or stdout, stderr if `ret_err`
Roughly equivalent to ``check_output`` in Python 2.7
Parameters
----------
cmd : sequence
command to execute
ret_err : bool, optional
If True, return stderr in addition to stdout. If False, just return
stdout
as_str : bool, optional
Whether to decode outputs to unicode string on exit.
raise_err : None or bool, optional
If True, raise RuntimeError for non-zero return code. If None, set to
True when `ret_err` is False, False if `ret_err` is True
Returns
-------
out : str or tuple
If `ret_err` is False, return stripped string containing stdout from
`cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where
``stdout`` is the stripped stdout, and ``stderr`` is the stripped
stderr.
Raises
------
Raises RuntimeError if command returns non-zero exit code and `raise_err`
is True | [
"Run",
"command",
"cmd",
"return",
"stdout",
"or",
"stdout",
"stderr",
"if",
"ret_err"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L17-L69 |
451 | matthew-brett/delocate | delocate/tools.py | unique_by_index | def unique_by_index(sequence):
""" unique elements in `sequence` in the order in which they occur
Parameters
----------
sequence : iterable
Returns
-------
uniques : list
unique elements of sequence, ordered by the order in which the element
occurs in `sequence`
"""
uniques = []
for element in sequence:
if element not in uniques:
uniques.append(element)
return uniques | python | def unique_by_index(sequence):
uniques = []
for element in sequence:
if element not in uniques:
uniques.append(element)
return uniques | [
"def",
"unique_by_index",
"(",
"sequence",
")",
":",
"uniques",
"=",
"[",
"]",
"for",
"element",
"in",
"sequence",
":",
"if",
"element",
"not",
"in",
"uniques",
":",
"uniques",
".",
"append",
"(",
"element",
")",
"return",
"uniques"
] | unique elements in `sequence` in the order in which they occur
Parameters
----------
sequence : iterable
Returns
-------
uniques : list
unique elements of sequence, ordered by the order in which the element
occurs in `sequence` | [
"unique",
"elements",
"in",
"sequence",
"in",
"the",
"order",
"in",
"which",
"they",
"occur"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L72-L89 |
452 | matthew-brett/delocate | delocate/tools.py | ensure_permissions | def ensure_permissions(mode_flags=stat.S_IWUSR):
"""decorator to ensure a filename has given permissions.
If changed, original permissions are restored after the decorated
modification.
"""
def decorator(f):
def modify(filename, *args, **kwargs):
m = chmod_perms(filename) if exists(filename) else mode_flags
if not m & mode_flags:
os.chmod(filename, m | mode_flags)
try:
return f(filename, *args, **kwargs)
finally:
# restore original permissions
if not m & mode_flags:
os.chmod(filename, m)
return modify
return decorator | python | def ensure_permissions(mode_flags=stat.S_IWUSR):
def decorator(f):
def modify(filename, *args, **kwargs):
m = chmod_perms(filename) if exists(filename) else mode_flags
if not m & mode_flags:
os.chmod(filename, m | mode_flags)
try:
return f(filename, *args, **kwargs)
finally:
# restore original permissions
if not m & mode_flags:
os.chmod(filename, m)
return modify
return decorator | [
"def",
"ensure_permissions",
"(",
"mode_flags",
"=",
"stat",
".",
"S_IWUSR",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"def",
"modify",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"m",
"=",
"chmod_perms",
"(",
"filename",
")",
"if",
"exists",
"(",
"filename",
")",
"else",
"mode_flags",
"if",
"not",
"m",
"&",
"mode_flags",
":",
"os",
".",
"chmod",
"(",
"filename",
",",
"m",
"|",
"mode_flags",
")",
"try",
":",
"return",
"f",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"# restore original permissions",
"if",
"not",
"m",
"&",
"mode_flags",
":",
"os",
".",
"chmod",
"(",
"filename",
",",
"m",
")",
"return",
"modify",
"return",
"decorator"
] | decorator to ensure a filename has given permissions.
If changed, original permissions are restored after the decorated
modification. | [
"decorator",
"to",
"ensure",
"a",
"filename",
"has",
"given",
"permissions",
"."
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L97-L117 |
453 | matthew-brett/delocate | delocate/tools.py | get_install_names | def get_install_names(filename):
""" Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename`
"""
lines = _cmd_out_err(['otool', '-L', filename])
if not _line0_says_object(lines[0], filename):
return ()
names = tuple(parse_install_name(line)[0] for line in lines[1:])
install_id = get_install_id(filename)
if not install_id is None:
assert names[0] == install_id
return names[1:]
return names | python | def get_install_names(filename):
lines = _cmd_out_err(['otool', '-L', filename])
if not _line0_says_object(lines[0], filename):
return ()
names = tuple(parse_install_name(line)[0] for line in lines[1:])
install_id = get_install_id(filename)
if not install_id is None:
assert names[0] == install_id
return names[1:]
return names | [
"def",
"get_install_names",
"(",
"filename",
")",
":",
"lines",
"=",
"_cmd_out_err",
"(",
"[",
"'otool'",
",",
"'-L'",
",",
"filename",
"]",
")",
"if",
"not",
"_line0_says_object",
"(",
"lines",
"[",
"0",
"]",
",",
"filename",
")",
":",
"return",
"(",
")",
"names",
"=",
"tuple",
"(",
"parse_install_name",
"(",
"line",
")",
"[",
"0",
"]",
"for",
"line",
"in",
"lines",
"[",
"1",
":",
"]",
")",
"install_id",
"=",
"get_install_id",
"(",
"filename",
")",
"if",
"not",
"install_id",
"is",
"None",
":",
"assert",
"names",
"[",
"0",
"]",
"==",
"install_id",
"return",
"names",
"[",
"1",
":",
"]",
"return",
"names"
] | Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename` | [
"Return",
"install",
"names",
"from",
"library",
"named",
"in",
"filename"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L197-L222 |
454 | matthew-brett/delocate | delocate/tools.py | get_install_id | def get_install_id(filename):
""" Return install id from library named in `filename`
Returns None if no install id, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_id : str
install id of library `filename`, or None if no install id
"""
lines = _cmd_out_err(['otool', '-D', filename])
if not _line0_says_object(lines[0], filename):
return None
if len(lines) == 1:
return None
if len(lines) != 2:
raise InstallNameError('Unexpected otool output ' + '\n'.join(lines))
return lines[1].strip() | python | def get_install_id(filename):
lines = _cmd_out_err(['otool', '-D', filename])
if not _line0_says_object(lines[0], filename):
return None
if len(lines) == 1:
return None
if len(lines) != 2:
raise InstallNameError('Unexpected otool output ' + '\n'.join(lines))
return lines[1].strip() | [
"def",
"get_install_id",
"(",
"filename",
")",
":",
"lines",
"=",
"_cmd_out_err",
"(",
"[",
"'otool'",
",",
"'-D'",
",",
"filename",
"]",
")",
"if",
"not",
"_line0_says_object",
"(",
"lines",
"[",
"0",
"]",
",",
"filename",
")",
":",
"return",
"None",
"if",
"len",
"(",
"lines",
")",
"==",
"1",
":",
"return",
"None",
"if",
"len",
"(",
"lines",
")",
"!=",
"2",
":",
"raise",
"InstallNameError",
"(",
"'Unexpected otool output '",
"+",
"'\\n'",
".",
"join",
"(",
"lines",
")",
")",
"return",
"lines",
"[",
"1",
"]",
".",
"strip",
"(",
")"
] | Return install id from library named in `filename`
Returns None if no install id, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_id : str
install id of library `filename`, or None if no install id | [
"Return",
"install",
"id",
"from",
"library",
"named",
"in",
"filename"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L225-L247 |
455 | matthew-brett/delocate | delocate/tools.py | set_install_name | def set_install_name(filename, oldname, newname):
""" Set install name `oldname` to `newname` in library filename
Parameters
----------
filename : str
filename of library
oldname : str
current install name in library
newname : str
replacement name for `oldname`
"""
names = get_install_names(filename)
if oldname not in names:
raise InstallNameError('{0} not in install names for {1}'.format(
oldname, filename))
back_tick(['install_name_tool', '-change', oldname, newname, filename]) | python | def set_install_name(filename, oldname, newname):
names = get_install_names(filename)
if oldname not in names:
raise InstallNameError('{0} not in install names for {1}'.format(
oldname, filename))
back_tick(['install_name_tool', '-change', oldname, newname, filename]) | [
"def",
"set_install_name",
"(",
"filename",
",",
"oldname",
",",
"newname",
")",
":",
"names",
"=",
"get_install_names",
"(",
"filename",
")",
"if",
"oldname",
"not",
"in",
"names",
":",
"raise",
"InstallNameError",
"(",
"'{0} not in install names for {1}'",
".",
"format",
"(",
"oldname",
",",
"filename",
")",
")",
"back_tick",
"(",
"[",
"'install_name_tool'",
",",
"'-change'",
",",
"oldname",
",",
"newname",
",",
"filename",
"]",
")"
] | Set install name `oldname` to `newname` in library filename
Parameters
----------
filename : str
filename of library
oldname : str
current install name in library
newname : str
replacement name for `oldname` | [
"Set",
"install",
"name",
"oldname",
"to",
"newname",
"in",
"library",
"filename"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L251-L267 |
456 | matthew-brett/delocate | delocate/tools.py | set_install_id | def set_install_id(filename, install_id):
""" Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
"""
if get_install_id(filename) is None:
raise InstallNameError('{0} has no install id'.format(filename))
back_tick(['install_name_tool', '-id', install_id, filename]) | python | def set_install_id(filename, install_id):
if get_install_id(filename) is None:
raise InstallNameError('{0} has no install id'.format(filename))
back_tick(['install_name_tool', '-id', install_id, filename]) | [
"def",
"set_install_id",
"(",
"filename",
",",
"install_id",
")",
":",
"if",
"get_install_id",
"(",
"filename",
")",
"is",
"None",
":",
"raise",
"InstallNameError",
"(",
"'{0} has no install id'",
".",
"format",
"(",
"filename",
")",
")",
"back_tick",
"(",
"[",
"'install_name_tool'",
",",
"'-id'",
",",
"install_id",
",",
"filename",
"]",
")"
] | Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id | [
"Set",
"install",
"id",
"for",
"library",
"named",
"in",
"filename"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L271-L287 |
457 | matthew-brett/delocate | delocate/tools.py | get_rpaths | def get_rpaths(filename):
""" Return a tuple of rpaths from the library `filename`
If `filename` is not a library then the returned tuple will be empty.
Parameters
----------
filaname : str
filename of library
Returns
-------
rpath : tuple
rpath paths in `filename`
"""
try:
lines = _cmd_out_err(['otool', '-l', filename])
except RuntimeError:
return ()
if not _line0_says_object(lines[0], filename):
return ()
lines = [line.strip() for line in lines]
paths = []
line_no = 1
while line_no < len(lines):
line = lines[line_no]
line_no += 1
if line != 'cmd LC_RPATH':
continue
cmdsize, path = lines[line_no:line_no+2]
assert cmdsize.startswith('cmdsize ')
paths.append(RPATH_RE.match(path).groups()[0])
line_no += 2
return tuple(paths) | python | def get_rpaths(filename):
try:
lines = _cmd_out_err(['otool', '-l', filename])
except RuntimeError:
return ()
if not _line0_says_object(lines[0], filename):
return ()
lines = [line.strip() for line in lines]
paths = []
line_no = 1
while line_no < len(lines):
line = lines[line_no]
line_no += 1
if line != 'cmd LC_RPATH':
continue
cmdsize, path = lines[line_no:line_no+2]
assert cmdsize.startswith('cmdsize ')
paths.append(RPATH_RE.match(path).groups()[0])
line_no += 2
return tuple(paths) | [
"def",
"get_rpaths",
"(",
"filename",
")",
":",
"try",
":",
"lines",
"=",
"_cmd_out_err",
"(",
"[",
"'otool'",
",",
"'-l'",
",",
"filename",
"]",
")",
"except",
"RuntimeError",
":",
"return",
"(",
")",
"if",
"not",
"_line0_says_object",
"(",
"lines",
"[",
"0",
"]",
",",
"filename",
")",
":",
"return",
"(",
")",
"lines",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"lines",
"]",
"paths",
"=",
"[",
"]",
"line_no",
"=",
"1",
"while",
"line_no",
"<",
"len",
"(",
"lines",
")",
":",
"line",
"=",
"lines",
"[",
"line_no",
"]",
"line_no",
"+=",
"1",
"if",
"line",
"!=",
"'cmd LC_RPATH'",
":",
"continue",
"cmdsize",
",",
"path",
"=",
"lines",
"[",
"line_no",
":",
"line_no",
"+",
"2",
"]",
"assert",
"cmdsize",
".",
"startswith",
"(",
"'cmdsize '",
")",
"paths",
".",
"append",
"(",
"RPATH_RE",
".",
"match",
"(",
"path",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"line_no",
"+=",
"2",
"return",
"tuple",
"(",
"paths",
")"
] | Return a tuple of rpaths from the library `filename`
If `filename` is not a library then the returned tuple will be empty.
Parameters
----------
filaname : str
filename of library
Returns
-------
rpath : tuple
rpath paths in `filename` | [
"Return",
"a",
"tuple",
"of",
"rpaths",
"from",
"the",
"library",
"filename"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L292-L325 |
458 | matthew-brett/delocate | delocate/tools.py | dir2zip | def dir2zip(in_dir, zip_fname):
""" Make a zip file `zip_fname` with contents of directory `in_dir`
The recorded filenames are relative to `in_dir`, so doing a standard zip
unpack of the resulting `zip_fname` in an empty directory will result in
the original directory contents.
Parameters
----------
in_dir : str
Directory path containing files to go in the zip archive
zip_fname : str
Filename of zip archive to write
"""
z = zipfile.ZipFile(zip_fname, 'w',
compression=zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(in_dir):
for file in files:
in_fname = pjoin(root, file)
in_stat = os.stat(in_fname)
# Preserve file permissions, but allow copy
info = zipfile.ZipInfo(in_fname)
info.filename = relpath(in_fname, in_dir)
if os.path.sep == '\\':
# Make the path unix friendly on windows.
# PyPI won't accept wheels with windows path separators
info.filename = relpath(in_fname, in_dir).replace('\\', '/')
# Set time from modification time
info.date_time = time.localtime(in_stat.st_mtime)
# See https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/48435482#48435482
# Also set regular file permissions
perms = stat.S_IMODE(in_stat.st_mode) | stat.S_IFREG
info.external_attr = perms << 16
with open_readable(in_fname, 'rb') as fobj:
contents = fobj.read()
z.writestr(info, contents, zipfile.ZIP_DEFLATED)
z.close() | python | def dir2zip(in_dir, zip_fname):
z = zipfile.ZipFile(zip_fname, 'w',
compression=zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(in_dir):
for file in files:
in_fname = pjoin(root, file)
in_stat = os.stat(in_fname)
# Preserve file permissions, but allow copy
info = zipfile.ZipInfo(in_fname)
info.filename = relpath(in_fname, in_dir)
if os.path.sep == '\\':
# Make the path unix friendly on windows.
# PyPI won't accept wheels with windows path separators
info.filename = relpath(in_fname, in_dir).replace('\\', '/')
# Set time from modification time
info.date_time = time.localtime(in_stat.st_mtime)
# See https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/48435482#48435482
# Also set regular file permissions
perms = stat.S_IMODE(in_stat.st_mode) | stat.S_IFREG
info.external_attr = perms << 16
with open_readable(in_fname, 'rb') as fobj:
contents = fobj.read()
z.writestr(info, contents, zipfile.ZIP_DEFLATED)
z.close() | [
"def",
"dir2zip",
"(",
"in_dir",
",",
"zip_fname",
")",
":",
"z",
"=",
"zipfile",
".",
"ZipFile",
"(",
"zip_fname",
",",
"'w'",
",",
"compression",
"=",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"in_dir",
")",
":",
"for",
"file",
"in",
"files",
":",
"in_fname",
"=",
"pjoin",
"(",
"root",
",",
"file",
")",
"in_stat",
"=",
"os",
".",
"stat",
"(",
"in_fname",
")",
"# Preserve file permissions, but allow copy",
"info",
"=",
"zipfile",
".",
"ZipInfo",
"(",
"in_fname",
")",
"info",
".",
"filename",
"=",
"relpath",
"(",
"in_fname",
",",
"in_dir",
")",
"if",
"os",
".",
"path",
".",
"sep",
"==",
"'\\\\'",
":",
"# Make the path unix friendly on windows.",
"# PyPI won't accept wheels with windows path separators",
"info",
".",
"filename",
"=",
"relpath",
"(",
"in_fname",
",",
"in_dir",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"# Set time from modification time",
"info",
".",
"date_time",
"=",
"time",
".",
"localtime",
"(",
"in_stat",
".",
"st_mtime",
")",
"# See https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/48435482#48435482",
"# Also set regular file permissions",
"perms",
"=",
"stat",
".",
"S_IMODE",
"(",
"in_stat",
".",
"st_mode",
")",
"|",
"stat",
".",
"S_IFREG",
"info",
".",
"external_attr",
"=",
"perms",
"<<",
"16",
"with",
"open_readable",
"(",
"in_fname",
",",
"'rb'",
")",
"as",
"fobj",
":",
"contents",
"=",
"fobj",
".",
"read",
"(",
")",
"z",
".",
"writestr",
"(",
"info",
",",
"contents",
",",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"z",
".",
"close",
"(",
")"
] | Make a zip file `zip_fname` with contents of directory `in_dir`
The recorded filenames are relative to `in_dir`, so doing a standard zip
unpack of the resulting `zip_fname` in an empty directory will result in
the original directory contents.
Parameters
----------
in_dir : str
Directory path containing files to go in the zip archive
zip_fname : str
Filename of zip archive to write | [
"Make",
"a",
"zip",
"file",
"zip_fname",
"with",
"contents",
"of",
"directory",
"in_dir"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L357-L393 |
459 | matthew-brett/delocate | delocate/tools.py | find_package_dirs | def find_package_dirs(root_path):
""" Find python package directories in directory `root_path`
Parameters
----------
root_path : str
Directory to search for package subdirectories
Returns
-------
package_sdirs : set
Set of strings where each is a subdirectory of `root_path`, containing
an ``__init__.py`` file. Paths prefixed by `root_path`
"""
package_sdirs = set()
for entry in os.listdir(root_path):
fname = entry if root_path == '.' else pjoin(root_path, entry)
if isdir(fname) and exists(pjoin(fname, '__init__.py')):
package_sdirs.add(fname)
return package_sdirs | python | def find_package_dirs(root_path):
package_sdirs = set()
for entry in os.listdir(root_path):
fname = entry if root_path == '.' else pjoin(root_path, entry)
if isdir(fname) and exists(pjoin(fname, '__init__.py')):
package_sdirs.add(fname)
return package_sdirs | [
"def",
"find_package_dirs",
"(",
"root_path",
")",
":",
"package_sdirs",
"=",
"set",
"(",
")",
"for",
"entry",
"in",
"os",
".",
"listdir",
"(",
"root_path",
")",
":",
"fname",
"=",
"entry",
"if",
"root_path",
"==",
"'.'",
"else",
"pjoin",
"(",
"root_path",
",",
"entry",
")",
"if",
"isdir",
"(",
"fname",
")",
"and",
"exists",
"(",
"pjoin",
"(",
"fname",
",",
"'__init__.py'",
")",
")",
":",
"package_sdirs",
".",
"add",
"(",
"fname",
")",
"return",
"package_sdirs"
] | Find python package directories in directory `root_path`
Parameters
----------
root_path : str
Directory to search for package subdirectories
Returns
-------
package_sdirs : set
Set of strings where each is a subdirectory of `root_path`, containing
an ``__init__.py`` file. Paths prefixed by `root_path` | [
"Find",
"python",
"package",
"directories",
"in",
"directory",
"root_path"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L396-L415 |
460 | matthew-brett/delocate | delocate/tools.py | cmp_contents | def cmp_contents(filename1, filename2):
""" Returns True if contents of the files are the same
Parameters
----------
filename1 : str
filename of first file to compare
filename2 : str
filename of second file to compare
Returns
-------
tf : bool
True if binary contents of `filename1` is same as binary contents of
`filename2`, False otherwise.
"""
with open_readable(filename1, 'rb') as fobj:
contents1 = fobj.read()
with open_readable(filename2, 'rb') as fobj:
contents2 = fobj.read()
return contents1 == contents2 | python | def cmp_contents(filename1, filename2):
with open_readable(filename1, 'rb') as fobj:
contents1 = fobj.read()
with open_readable(filename2, 'rb') as fobj:
contents2 = fobj.read()
return contents1 == contents2 | [
"def",
"cmp_contents",
"(",
"filename1",
",",
"filename2",
")",
":",
"with",
"open_readable",
"(",
"filename1",
",",
"'rb'",
")",
"as",
"fobj",
":",
"contents1",
"=",
"fobj",
".",
"read",
"(",
")",
"with",
"open_readable",
"(",
"filename2",
",",
"'rb'",
")",
"as",
"fobj",
":",
"contents2",
"=",
"fobj",
".",
"read",
"(",
")",
"return",
"contents1",
"==",
"contents2"
] | Returns True if contents of the files are the same
Parameters
----------
filename1 : str
filename of first file to compare
filename2 : str
filename of second file to compare
Returns
-------
tf : bool
True if binary contents of `filename1` is same as binary contents of
`filename2`, False otherwise. | [
"Returns",
"True",
"if",
"contents",
"of",
"the",
"files",
"are",
"the",
"same"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L418-L438 |
461 | matthew-brett/delocate | delocate/tools.py | get_archs | def get_archs(libname):
""" Return architecture types from library `libname`
Parameters
----------
libname : str
filename of binary for which to return arch codes
Returns
-------
arch_names : frozenset
Empty (frozen)set if no arch codes. If not empty, contains one or more
of 'ppc', 'ppc64', 'i386', 'x86_64'
"""
if not exists(libname):
raise RuntimeError(libname + " is not a file")
try:
stdout = back_tick(['lipo', '-info', libname])
except RuntimeError:
return frozenset()
lines = [line.strip() for line in stdout.split('\n') if line.strip()]
# For some reason, output from lipo -info on .a file generates this line
if lines[0] == "input file {0} is not a fat file".format(libname):
line = lines[1]
else:
assert len(lines) == 1
line = lines[0]
for reggie in (
'Non-fat file: {0} is architecture: (.*)'.format(libname),
'Architectures in the fat file: {0} are: (.*)'.format(libname)):
reggie = re.compile(reggie)
match = reggie.match(line)
if not match is None:
return frozenset(match.groups()[0].split(' '))
raise ValueError("Unexpected output: '{0}' for {1}".format(
stdout, libname)) | python | def get_archs(libname):
if not exists(libname):
raise RuntimeError(libname + " is not a file")
try:
stdout = back_tick(['lipo', '-info', libname])
except RuntimeError:
return frozenset()
lines = [line.strip() for line in stdout.split('\n') if line.strip()]
# For some reason, output from lipo -info on .a file generates this line
if lines[0] == "input file {0} is not a fat file".format(libname):
line = lines[1]
else:
assert len(lines) == 1
line = lines[0]
for reggie in (
'Non-fat file: {0} is architecture: (.*)'.format(libname),
'Architectures in the fat file: {0} are: (.*)'.format(libname)):
reggie = re.compile(reggie)
match = reggie.match(line)
if not match is None:
return frozenset(match.groups()[0].split(' '))
raise ValueError("Unexpected output: '{0}' for {1}".format(
stdout, libname)) | [
"def",
"get_archs",
"(",
"libname",
")",
":",
"if",
"not",
"exists",
"(",
"libname",
")",
":",
"raise",
"RuntimeError",
"(",
"libname",
"+",
"\" is not a file\"",
")",
"try",
":",
"stdout",
"=",
"back_tick",
"(",
"[",
"'lipo'",
",",
"'-info'",
",",
"libname",
"]",
")",
"except",
"RuntimeError",
":",
"return",
"frozenset",
"(",
")",
"lines",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"stdout",
".",
"split",
"(",
"'\\n'",
")",
"if",
"line",
".",
"strip",
"(",
")",
"]",
"# For some reason, output from lipo -info on .a file generates this line",
"if",
"lines",
"[",
"0",
"]",
"==",
"\"input file {0} is not a fat file\"",
".",
"format",
"(",
"libname",
")",
":",
"line",
"=",
"lines",
"[",
"1",
"]",
"else",
":",
"assert",
"len",
"(",
"lines",
")",
"==",
"1",
"line",
"=",
"lines",
"[",
"0",
"]",
"for",
"reggie",
"in",
"(",
"'Non-fat file: {0} is architecture: (.*)'",
".",
"format",
"(",
"libname",
")",
",",
"'Architectures in the fat file: {0} are: (.*)'",
".",
"format",
"(",
"libname",
")",
")",
":",
"reggie",
"=",
"re",
".",
"compile",
"(",
"reggie",
")",
"match",
"=",
"reggie",
".",
"match",
"(",
"line",
")",
"if",
"not",
"match",
"is",
"None",
":",
"return",
"frozenset",
"(",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"' '",
")",
")",
"raise",
"ValueError",
"(",
"\"Unexpected output: '{0}' for {1}\"",
".",
"format",
"(",
"stdout",
",",
"libname",
")",
")"
] | Return architecture types from library `libname`
Parameters
----------
libname : str
filename of binary for which to return arch codes
Returns
-------
arch_names : frozenset
Empty (frozen)set if no arch codes. If not empty, contains one or more
of 'ppc', 'ppc64', 'i386', 'x86_64' | [
"Return",
"architecture",
"types",
"from",
"library",
"libname"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L441-L476 |
462 | matthew-brett/delocate | delocate/tools.py | validate_signature | def validate_signature(filename):
""" Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file
"""
out, err = back_tick(['codesign', '--verify', filename],
ret_err=True, as_str=True, raise_err=False)
if not err:
return # The existing signature is valid
if 'code object is not signed at all' in err:
return # File has no signature, and adding a new one isn't necessary
# This file's signature is invalid and needs to be replaced
replace_signature(filename, '-') | python | def validate_signature(filename):
out, err = back_tick(['codesign', '--verify', filename],
ret_err=True, as_str=True, raise_err=False)
if not err:
return # The existing signature is valid
if 'code object is not signed at all' in err:
return # File has no signature, and adding a new one isn't necessary
# This file's signature is invalid and needs to be replaced
replace_signature(filename, '-') | [
"def",
"validate_signature",
"(",
"filename",
")",
":",
"out",
",",
"err",
"=",
"back_tick",
"(",
"[",
"'codesign'",
",",
"'--verify'",
",",
"filename",
"]",
",",
"ret_err",
"=",
"True",
",",
"as_str",
"=",
"True",
",",
"raise_err",
"=",
"False",
")",
"if",
"not",
"err",
":",
"return",
"# The existing signature is valid",
"if",
"'code object is not signed at all'",
"in",
"err",
":",
"return",
"# File has no signature, and adding a new one isn't necessary",
"# This file's signature is invalid and needs to be replaced",
"replace_signature",
"(",
"filename",
",",
"'-'",
")"
] | Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file | [
"Remove",
"invalid",
"signatures",
"from",
"a",
"binary",
"file"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L513-L534 |
463 | matthew-brett/delocate | delocate/fuse.py | fuse_trees | def fuse_trees(to_tree, from_tree, lib_exts=('.so', '.dylib', '.a')):
""" Fuse path `from_tree` into path `to_tree`
For each file in `from_tree` - check for library file extension (in
`lib_exts` - if present, check if there is a file with matching relative
path in `to_tree`, if so, use :func:`delocate.tools.lipo_fuse` to fuse the
two libraries together and write into `to_tree`. If any of these
conditions are not met, just copy the file from `from_tree` to `to_tree`.
Parameters
---------
to_tree : str
path of tree to fuse into (update into)
from_tree : str
path of tree to fuse from (update from)
lib_exts : sequence, optional
filename extensions for libraries
"""
for from_dirpath, dirnames, filenames in os.walk(from_tree):
to_dirpath = pjoin(to_tree, relpath(from_dirpath, from_tree))
# Copy any missing directories in to_path
for dirname in tuple(dirnames):
to_path = pjoin(to_dirpath, dirname)
if not exists(to_path):
from_path = pjoin(from_dirpath, dirname)
shutil.copytree(from_path, to_path)
# If copying, don't further analyze this directory
dirnames.remove(dirname)
for fname in filenames:
root, ext = splitext(fname)
from_path = pjoin(from_dirpath, fname)
to_path = pjoin(to_dirpath, fname)
if not exists(to_path):
_copyfile(from_path, to_path)
elif cmp_contents(from_path, to_path):
pass
elif ext in lib_exts:
# existing lib that needs fuse
lipo_fuse(from_path, to_path, to_path)
else:
# existing not-lib file not identical to source
_copyfile(from_path, to_path) | python | def fuse_trees(to_tree, from_tree, lib_exts=('.so', '.dylib', '.a')):
for from_dirpath, dirnames, filenames in os.walk(from_tree):
to_dirpath = pjoin(to_tree, relpath(from_dirpath, from_tree))
# Copy any missing directories in to_path
for dirname in tuple(dirnames):
to_path = pjoin(to_dirpath, dirname)
if not exists(to_path):
from_path = pjoin(from_dirpath, dirname)
shutil.copytree(from_path, to_path)
# If copying, don't further analyze this directory
dirnames.remove(dirname)
for fname in filenames:
root, ext = splitext(fname)
from_path = pjoin(from_dirpath, fname)
to_path = pjoin(to_dirpath, fname)
if not exists(to_path):
_copyfile(from_path, to_path)
elif cmp_contents(from_path, to_path):
pass
elif ext in lib_exts:
# existing lib that needs fuse
lipo_fuse(from_path, to_path, to_path)
else:
# existing not-lib file not identical to source
_copyfile(from_path, to_path) | [
"def",
"fuse_trees",
"(",
"to_tree",
",",
"from_tree",
",",
"lib_exts",
"=",
"(",
"'.so'",
",",
"'.dylib'",
",",
"'.a'",
")",
")",
":",
"for",
"from_dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"from_tree",
")",
":",
"to_dirpath",
"=",
"pjoin",
"(",
"to_tree",
",",
"relpath",
"(",
"from_dirpath",
",",
"from_tree",
")",
")",
"# Copy any missing directories in to_path",
"for",
"dirname",
"in",
"tuple",
"(",
"dirnames",
")",
":",
"to_path",
"=",
"pjoin",
"(",
"to_dirpath",
",",
"dirname",
")",
"if",
"not",
"exists",
"(",
"to_path",
")",
":",
"from_path",
"=",
"pjoin",
"(",
"from_dirpath",
",",
"dirname",
")",
"shutil",
".",
"copytree",
"(",
"from_path",
",",
"to_path",
")",
"# If copying, don't further analyze this directory",
"dirnames",
".",
"remove",
"(",
"dirname",
")",
"for",
"fname",
"in",
"filenames",
":",
"root",
",",
"ext",
"=",
"splitext",
"(",
"fname",
")",
"from_path",
"=",
"pjoin",
"(",
"from_dirpath",
",",
"fname",
")",
"to_path",
"=",
"pjoin",
"(",
"to_dirpath",
",",
"fname",
")",
"if",
"not",
"exists",
"(",
"to_path",
")",
":",
"_copyfile",
"(",
"from_path",
",",
"to_path",
")",
"elif",
"cmp_contents",
"(",
"from_path",
",",
"to_path",
")",
":",
"pass",
"elif",
"ext",
"in",
"lib_exts",
":",
"# existing lib that needs fuse",
"lipo_fuse",
"(",
"from_path",
",",
"to_path",
",",
"to_path",
")",
"else",
":",
"# existing not-lib file not identical to source",
"_copyfile",
"(",
"from_path",
",",
"to_path",
")"
] | Fuse path `from_tree` into path `to_tree`
For each file in `from_tree` - check for library file extension (in
`lib_exts` - if present, check if there is a file with matching relative
path in `to_tree`, if so, use :func:`delocate.tools.lipo_fuse` to fuse the
two libraries together and write into `to_tree`. If any of these
conditions are not met, just copy the file from `from_tree` to `to_tree`.
Parameters
---------
to_tree : str
path of tree to fuse into (update into)
from_tree : str
path of tree to fuse from (update from)
lib_exts : sequence, optional
filename extensions for libraries | [
"Fuse",
"path",
"from_tree",
"into",
"path",
"to_tree"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/fuse.py#L36-L77 |
464 | matthew-brett/delocate | delocate/fuse.py | fuse_wheels | def fuse_wheels(to_wheel, from_wheel, out_wheel):
""" Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel`
"""
to_wheel, from_wheel, out_wheel = [
abspath(w) for w in (to_wheel, from_wheel, out_wheel)]
with InTemporaryDirectory():
zip2dir(to_wheel, 'to_wheel')
zip2dir(from_wheel, 'from_wheel')
fuse_trees('to_wheel', 'from_wheel')
rewrite_record('to_wheel')
dir2zip('to_wheel', out_wheel) | python | def fuse_wheels(to_wheel, from_wheel, out_wheel):
to_wheel, from_wheel, out_wheel = [
abspath(w) for w in (to_wheel, from_wheel, out_wheel)]
with InTemporaryDirectory():
zip2dir(to_wheel, 'to_wheel')
zip2dir(from_wheel, 'from_wheel')
fuse_trees('to_wheel', 'from_wheel')
rewrite_record('to_wheel')
dir2zip('to_wheel', out_wheel) | [
"def",
"fuse_wheels",
"(",
"to_wheel",
",",
"from_wheel",
",",
"out_wheel",
")",
":",
"to_wheel",
",",
"from_wheel",
",",
"out_wheel",
"=",
"[",
"abspath",
"(",
"w",
")",
"for",
"w",
"in",
"(",
"to_wheel",
",",
"from_wheel",
",",
"out_wheel",
")",
"]",
"with",
"InTemporaryDirectory",
"(",
")",
":",
"zip2dir",
"(",
"to_wheel",
",",
"'to_wheel'",
")",
"zip2dir",
"(",
"from_wheel",
",",
"'from_wheel'",
")",
"fuse_trees",
"(",
"'to_wheel'",
",",
"'from_wheel'",
")",
"rewrite_record",
"(",
"'to_wheel'",
")",
"dir2zip",
"(",
"'to_wheel'",
",",
"out_wheel",
")"
] | Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel` | [
"Fuse",
"from_wheel",
"into",
"to_wheel",
"write",
"to",
"out_wheel"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/fuse.py#L80-L99 |
465 | matthew-brett/delocate | delocate/delocating.py | delocate_tree_libs | def delocate_tree_libs(lib_dict, lib_path, root_path):
""" Move needed libraries in `lib_dict` into `lib_path`
`lib_dict` has keys naming libraries required by the files in the
corresponding value. Call the keys, "required libs". Call the values
"requiring objects".
Copy all the required libs to `lib_path`. Fix up the rpaths and install
names in the requiring objects to point to these new copies.
Exception: required libs within the directory tree pointed to by
`root_path` stay where they are, but we modify requiring objects to use
relative paths to these libraries.
Parameters
----------
lib_dict : dict
Dictionary with (key, value) pairs of (``depended_lib_path``,
``dependings_dict``) (see :func:`libsana.tree_libs`)
lib_path : str
Path in which to store copies of libs referred to in keys of
`lib_dict`. Assumed to exist
root_path : str, optional
Root directory of tree analyzed in `lib_dict`. Any required
library within the subtrees of `root_path` does not get copied, but
libraries linking to it have links adjusted to use relative path to
this library.
Returns
-------
copied_libs : dict
Filtered `lib_dict` dict containing only the (key, value) pairs from
`lib_dict` where the keys are the libraries copied to `lib_path``.
"""
copied_libs = {}
delocated_libs = set()
copied_basenames = set()
rp_root_path = realpath(root_path)
rp_lib_path = realpath(lib_path)
# Test for errors first to avoid getting half-way through changing the tree
for required, requirings in lib_dict.items():
if required.startswith('@'): # assume @rpath etc are correct
# But warn, because likely they are not
warnings.warn('Not processing required path {0} because it '
'begins with @'.format(required))
continue
r_ed_base = basename(required)
if relpath(required, rp_root_path).startswith('..'):
# Not local, plan to copy
if r_ed_base in copied_basenames:
raise DelocationError('Already planning to copy library with '
'same basename as: ' + r_ed_base)
if not exists(required):
raise DelocationError('library "{0}" does not exist'.format(
required))
copied_libs[required] = requirings
copied_basenames.add(r_ed_base)
else: # Is local, plan to set relative loader_path
delocated_libs.add(required)
# Modify in place now that we've checked for errors
for required in copied_libs:
shutil.copy(required, lib_path)
# Set rpath and install names for this copied library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(rp_lib_path, dirname(requiring))
set_install_name(requiring, orig_install_name,
'@loader_path/{0}/{1}'.format(
req_rel, basename(required)))
for required in delocated_libs:
# Set relative path for local library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(required, dirname(requiring))
set_install_name(requiring, orig_install_name,
'@loader_path/' + req_rel)
return copied_libs | python | def delocate_tree_libs(lib_dict, lib_path, root_path):
copied_libs = {}
delocated_libs = set()
copied_basenames = set()
rp_root_path = realpath(root_path)
rp_lib_path = realpath(lib_path)
# Test for errors first to avoid getting half-way through changing the tree
for required, requirings in lib_dict.items():
if required.startswith('@'): # assume @rpath etc are correct
# But warn, because likely they are not
warnings.warn('Not processing required path {0} because it '
'begins with @'.format(required))
continue
r_ed_base = basename(required)
if relpath(required, rp_root_path).startswith('..'):
# Not local, plan to copy
if r_ed_base in copied_basenames:
raise DelocationError('Already planning to copy library with '
'same basename as: ' + r_ed_base)
if not exists(required):
raise DelocationError('library "{0}" does not exist'.format(
required))
copied_libs[required] = requirings
copied_basenames.add(r_ed_base)
else: # Is local, plan to set relative loader_path
delocated_libs.add(required)
# Modify in place now that we've checked for errors
for required in copied_libs:
shutil.copy(required, lib_path)
# Set rpath and install names for this copied library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(rp_lib_path, dirname(requiring))
set_install_name(requiring, orig_install_name,
'@loader_path/{0}/{1}'.format(
req_rel, basename(required)))
for required in delocated_libs:
# Set relative path for local library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(required, dirname(requiring))
set_install_name(requiring, orig_install_name,
'@loader_path/' + req_rel)
return copied_libs | [
"def",
"delocate_tree_libs",
"(",
"lib_dict",
",",
"lib_path",
",",
"root_path",
")",
":",
"copied_libs",
"=",
"{",
"}",
"delocated_libs",
"=",
"set",
"(",
")",
"copied_basenames",
"=",
"set",
"(",
")",
"rp_root_path",
"=",
"realpath",
"(",
"root_path",
")",
"rp_lib_path",
"=",
"realpath",
"(",
"lib_path",
")",
"# Test for errors first to avoid getting half-way through changing the tree",
"for",
"required",
",",
"requirings",
"in",
"lib_dict",
".",
"items",
"(",
")",
":",
"if",
"required",
".",
"startswith",
"(",
"'@'",
")",
":",
"# assume @rpath etc are correct",
"# But warn, because likely they are not",
"warnings",
".",
"warn",
"(",
"'Not processing required path {0} because it '",
"'begins with @'",
".",
"format",
"(",
"required",
")",
")",
"continue",
"r_ed_base",
"=",
"basename",
"(",
"required",
")",
"if",
"relpath",
"(",
"required",
",",
"rp_root_path",
")",
".",
"startswith",
"(",
"'..'",
")",
":",
"# Not local, plan to copy",
"if",
"r_ed_base",
"in",
"copied_basenames",
":",
"raise",
"DelocationError",
"(",
"'Already planning to copy library with '",
"'same basename as: '",
"+",
"r_ed_base",
")",
"if",
"not",
"exists",
"(",
"required",
")",
":",
"raise",
"DelocationError",
"(",
"'library \"{0}\" does not exist'",
".",
"format",
"(",
"required",
")",
")",
"copied_libs",
"[",
"required",
"]",
"=",
"requirings",
"copied_basenames",
".",
"add",
"(",
"r_ed_base",
")",
"else",
":",
"# Is local, plan to set relative loader_path",
"delocated_libs",
".",
"add",
"(",
"required",
")",
"# Modify in place now that we've checked for errors",
"for",
"required",
"in",
"copied_libs",
":",
"shutil",
".",
"copy",
"(",
"required",
",",
"lib_path",
")",
"# Set rpath and install names for this copied library",
"for",
"requiring",
",",
"orig_install_name",
"in",
"lib_dict",
"[",
"required",
"]",
".",
"items",
"(",
")",
":",
"req_rel",
"=",
"relpath",
"(",
"rp_lib_path",
",",
"dirname",
"(",
"requiring",
")",
")",
"set_install_name",
"(",
"requiring",
",",
"orig_install_name",
",",
"'@loader_path/{0}/{1}'",
".",
"format",
"(",
"req_rel",
",",
"basename",
"(",
"required",
")",
")",
")",
"for",
"required",
"in",
"delocated_libs",
":",
"# Set relative path for local library",
"for",
"requiring",
",",
"orig_install_name",
"in",
"lib_dict",
"[",
"required",
"]",
".",
"items",
"(",
")",
":",
"req_rel",
"=",
"relpath",
"(",
"required",
",",
"dirname",
"(",
"requiring",
")",
")",
"set_install_name",
"(",
"requiring",
",",
"orig_install_name",
",",
"'@loader_path/'",
"+",
"req_rel",
")",
"return",
"copied_libs"
] | Move needed libraries in `lib_dict` into `lib_path`
`lib_dict` has keys naming libraries required by the files in the
corresponding value. Call the keys, "required libs". Call the values
"requiring objects".
Copy all the required libs to `lib_path`. Fix up the rpaths and install
names in the requiring objects to point to these new copies.
Exception: required libs within the directory tree pointed to by
`root_path` stay where they are, but we modify requiring objects to use
relative paths to these libraries.
Parameters
----------
lib_dict : dict
Dictionary with (key, value) pairs of (``depended_lib_path``,
``dependings_dict``) (see :func:`libsana.tree_libs`)
lib_path : str
Path in which to store copies of libs referred to in keys of
`lib_dict`. Assumed to exist
root_path : str, optional
Root directory of tree analyzed in `lib_dict`. Any required
library within the subtrees of `root_path` does not get copied, but
libraries linking to it have links adjusted to use relative path to
this library.
Returns
-------
copied_libs : dict
Filtered `lib_dict` dict containing only the (key, value) pairs from
`lib_dict` where the keys are the libraries copied to `lib_path``. | [
"Move",
"needed",
"libraries",
"in",
"lib_dict",
"into",
"lib_path"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L27-L101 |
466 | matthew-brett/delocate | delocate/delocating.py | copy_recurse | def copy_recurse(lib_path, copy_filt_func = None, copied_libs = None):
""" Analyze `lib_path` for library dependencies and copy libraries
`lib_path` is a directory containing libraries. The libraries might
themselves have dependencies. This function analyzes the dependencies and
copies library dependencies that match the filter `copy_filt_func`. It also
adjusts the depending libraries to use the copy. It keeps iterating over
`lib_path` until all matching dependencies (of dependencies of dependencies
...) have been copied.
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each depended library name; copy where
``copy_filt_func(libname)`` is True, don't copy otherwise
copied_libs : dict
Dict with (key, value) pairs of (``copied_lib_path``,
``dependings_dict``) where ``copied_lib_path`` is the canonical path of
a library that has been copied to `lib_path`, and ``dependings_dict``
is a dictionary with (key, value) pairs of (``depending_lib_path``,
``install_name``). ``depending_lib_path`` is the canonical path of the
library depending on ``copied_lib_path``, ``install_name`` is the name
that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in
its install names).
Returns
-------
copied_libs : dict
Input `copied_libs` dict with any extra libraries and / or dependencies
added.
"""
if copied_libs is None:
copied_libs = {}
else:
copied_libs = dict(copied_libs)
done = False
while not done:
in_len = len(copied_libs)
_copy_required(lib_path, copy_filt_func, copied_libs)
done = len(copied_libs) == in_len
return copied_libs | python | def copy_recurse(lib_path, copy_filt_func = None, copied_libs = None):
if copied_libs is None:
copied_libs = {}
else:
copied_libs = dict(copied_libs)
done = False
while not done:
in_len = len(copied_libs)
_copy_required(lib_path, copy_filt_func, copied_libs)
done = len(copied_libs) == in_len
return copied_libs | [
"def",
"copy_recurse",
"(",
"lib_path",
",",
"copy_filt_func",
"=",
"None",
",",
"copied_libs",
"=",
"None",
")",
":",
"if",
"copied_libs",
"is",
"None",
":",
"copied_libs",
"=",
"{",
"}",
"else",
":",
"copied_libs",
"=",
"dict",
"(",
"copied_libs",
")",
"done",
"=",
"False",
"while",
"not",
"done",
":",
"in_len",
"=",
"len",
"(",
"copied_libs",
")",
"_copy_required",
"(",
"lib_path",
",",
"copy_filt_func",
",",
"copied_libs",
")",
"done",
"=",
"len",
"(",
"copied_libs",
")",
"==",
"in_len",
"return",
"copied_libs"
] | Analyze `lib_path` for library dependencies and copy libraries
`lib_path` is a directory containing libraries. The libraries might
themselves have dependencies. This function analyzes the dependencies and
copies library dependencies that match the filter `copy_filt_func`. It also
adjusts the depending libraries to use the copy. It keeps iterating over
`lib_path` until all matching dependencies (of dependencies of dependencies
...) have been copied.
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each depended library name; copy where
``copy_filt_func(libname)`` is True, don't copy otherwise
copied_libs : dict
Dict with (key, value) pairs of (``copied_lib_path``,
``dependings_dict``) where ``copied_lib_path`` is the canonical path of
a library that has been copied to `lib_path`, and ``dependings_dict``
is a dictionary with (key, value) pairs of (``depending_lib_path``,
``install_name``). ``depending_lib_path`` is the canonical path of the
library depending on ``copied_lib_path``, ``install_name`` is the name
that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in
its install names).
Returns
-------
copied_libs : dict
Input `copied_libs` dict with any extra libraries and / or dependencies
added. | [
"Analyze",
"lib_path",
"for",
"library",
"dependencies",
"and",
"copy",
"libraries"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L104-L147 |
467 | matthew-brett/delocate | delocate/delocating.py | _copy_required | def _copy_required(lib_path, copy_filt_func, copied_libs):
""" Copy libraries required for files in `lib_path` to `lib_path`
Augment `copied_libs` dictionary with any newly copied libraries, modifying
`copied_libs` in-place - see Notes.
This is one pass of ``copy_recurse``
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each library name; copy where ``copy_filt_func(libname)`` is
True, don't copy otherwise
copied_libs : dict
See :func:`copy_recurse` for definition.
Notes
-----
If we need to copy another library, add that (``depended_lib_path``,
``dependings_dict``) to `copied_libs`. ``dependings_dict`` has (key,
value) pairs of (``depending_lib_path``, ``install_name``).
``depending_lib_path`` will be the original (canonical) library name, not
the copy in ``lib_path``.
Sometimes we copy a library, that further depends on a library we have
already copied. In this case update ``copied_libs[depended_lib]`` with the
extra dependency (as well as fixing up the install names for the depending
library).
For example, imagine we've start with a lib path like this::
my_lib_path/
libA.dylib
libB.dylib
Our input `copied_libs` has keys ``/sys/libA.dylib``, ``/sys/libB.lib``
telling us we previously copied those guys from the ``/sys`` folder.
On a first pass, we discover that ``libA.dylib`` depends on
``/sys/libC.dylib``, so we copy that.
On a second pass, we discover now that ``libC.dylib`` also depends on
``/sys/libB.dylib``. `copied_libs` tells us that we already have a copy of
``/sys/libB.dylib``, so we fix our copy of `libC.dylib`` to point to
``my_lib_path/libB.dylib`` and add ``/sys/libC.dylib`` as a
``dependings_dict`` entry for ``copied_libs['/sys/libB.dylib']``
"""
# Paths will be prepended with `lib_path`
lib_dict = tree_libs(lib_path)
# Map library paths after copy ('copied') to path before copy ('orig')
rp_lp = realpath(lib_path)
copied2orig = dict((pjoin(rp_lp, basename(c)), c) for c in copied_libs)
for required, requirings in lib_dict.items():
if not copy_filt_func is None and not copy_filt_func(required):
continue
if required.startswith('@'):
# May have been processed by us, or have some rpath, loader_path of
# its own. Either way, leave alone
continue
# Requiring names may well be the copies in lib_path. Replace the copy
# names with the original names for entry into `copied_libs`
procd_requirings = {}
# Set requiring lib install names to point to local copy
for requiring, orig_install_name in requirings.items():
set_install_name(requiring,
orig_install_name,
'@loader_path/' + basename(required))
# Make processed version of ``dependings_dict``
mapped_requiring = copied2orig.get(requiring, requiring)
procd_requirings[mapped_requiring] = orig_install_name
if required in copied_libs:
# Have copied this already, add any new requirings
copied_libs[required].update(procd_requirings)
continue
# Haven't see this one before, add entry to copied_libs
out_path = pjoin(lib_path, basename(required))
if exists(out_path):
raise DelocationError(out_path + ' already exists')
shutil.copy(required, lib_path)
copied2orig[out_path] = required
copied_libs[required] = procd_requirings | python | def _copy_required(lib_path, copy_filt_func, copied_libs):
# Paths will be prepended with `lib_path`
lib_dict = tree_libs(lib_path)
# Map library paths after copy ('copied') to path before copy ('orig')
rp_lp = realpath(lib_path)
copied2orig = dict((pjoin(rp_lp, basename(c)), c) for c in copied_libs)
for required, requirings in lib_dict.items():
if not copy_filt_func is None and not copy_filt_func(required):
continue
if required.startswith('@'):
# May have been processed by us, or have some rpath, loader_path of
# its own. Either way, leave alone
continue
# Requiring names may well be the copies in lib_path. Replace the copy
# names with the original names for entry into `copied_libs`
procd_requirings = {}
# Set requiring lib install names to point to local copy
for requiring, orig_install_name in requirings.items():
set_install_name(requiring,
orig_install_name,
'@loader_path/' + basename(required))
# Make processed version of ``dependings_dict``
mapped_requiring = copied2orig.get(requiring, requiring)
procd_requirings[mapped_requiring] = orig_install_name
if required in copied_libs:
# Have copied this already, add any new requirings
copied_libs[required].update(procd_requirings)
continue
# Haven't see this one before, add entry to copied_libs
out_path = pjoin(lib_path, basename(required))
if exists(out_path):
raise DelocationError(out_path + ' already exists')
shutil.copy(required, lib_path)
copied2orig[out_path] = required
copied_libs[required] = procd_requirings | [
"def",
"_copy_required",
"(",
"lib_path",
",",
"copy_filt_func",
",",
"copied_libs",
")",
":",
"# Paths will be prepended with `lib_path`",
"lib_dict",
"=",
"tree_libs",
"(",
"lib_path",
")",
"# Map library paths after copy ('copied') to path before copy ('orig')",
"rp_lp",
"=",
"realpath",
"(",
"lib_path",
")",
"copied2orig",
"=",
"dict",
"(",
"(",
"pjoin",
"(",
"rp_lp",
",",
"basename",
"(",
"c",
")",
")",
",",
"c",
")",
"for",
"c",
"in",
"copied_libs",
")",
"for",
"required",
",",
"requirings",
"in",
"lib_dict",
".",
"items",
"(",
")",
":",
"if",
"not",
"copy_filt_func",
"is",
"None",
"and",
"not",
"copy_filt_func",
"(",
"required",
")",
":",
"continue",
"if",
"required",
".",
"startswith",
"(",
"'@'",
")",
":",
"# May have been processed by us, or have some rpath, loader_path of",
"# its own. Either way, leave alone",
"continue",
"# Requiring names may well be the copies in lib_path. Replace the copy",
"# names with the original names for entry into `copied_libs`",
"procd_requirings",
"=",
"{",
"}",
"# Set requiring lib install names to point to local copy",
"for",
"requiring",
",",
"orig_install_name",
"in",
"requirings",
".",
"items",
"(",
")",
":",
"set_install_name",
"(",
"requiring",
",",
"orig_install_name",
",",
"'@loader_path/'",
"+",
"basename",
"(",
"required",
")",
")",
"# Make processed version of ``dependings_dict``",
"mapped_requiring",
"=",
"copied2orig",
".",
"get",
"(",
"requiring",
",",
"requiring",
")",
"procd_requirings",
"[",
"mapped_requiring",
"]",
"=",
"orig_install_name",
"if",
"required",
"in",
"copied_libs",
":",
"# Have copied this already, add any new requirings",
"copied_libs",
"[",
"required",
"]",
".",
"update",
"(",
"procd_requirings",
")",
"continue",
"# Haven't see this one before, add entry to copied_libs",
"out_path",
"=",
"pjoin",
"(",
"lib_path",
",",
"basename",
"(",
"required",
")",
")",
"if",
"exists",
"(",
"out_path",
")",
":",
"raise",
"DelocationError",
"(",
"out_path",
"+",
"' already exists'",
")",
"shutil",
".",
"copy",
"(",
"required",
",",
"lib_path",
")",
"copied2orig",
"[",
"out_path",
"]",
"=",
"required",
"copied_libs",
"[",
"required",
"]",
"=",
"procd_requirings"
] | Copy libraries required for files in `lib_path` to `lib_path`
Augment `copied_libs` dictionary with any newly copied libraries, modifying
`copied_libs` in-place - see Notes.
This is one pass of ``copy_recurse``
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each library name; copy where ``copy_filt_func(libname)`` is
True, don't copy otherwise
copied_libs : dict
See :func:`copy_recurse` for definition.
Notes
-----
If we need to copy another library, add that (``depended_lib_path``,
``dependings_dict``) to `copied_libs`. ``dependings_dict`` has (key,
value) pairs of (``depending_lib_path``, ``install_name``).
``depending_lib_path`` will be the original (canonical) library name, not
the copy in ``lib_path``.
Sometimes we copy a library, that further depends on a library we have
already copied. In this case update ``copied_libs[depended_lib]`` with the
extra dependency (as well as fixing up the install names for the depending
library).
For example, imagine we've start with a lib path like this::
my_lib_path/
libA.dylib
libB.dylib
Our input `copied_libs` has keys ``/sys/libA.dylib``, ``/sys/libB.lib``
telling us we previously copied those guys from the ``/sys`` folder.
On a first pass, we discover that ``libA.dylib`` depends on
``/sys/libC.dylib``, so we copy that.
On a second pass, we discover now that ``libC.dylib`` also depends on
``/sys/libB.dylib``. `copied_libs` tells us that we already have a copy of
``/sys/libB.dylib``, so we fix our copy of `libC.dylib`` to point to
``my_lib_path/libB.dylib`` and add ``/sys/libC.dylib`` as a
``dependings_dict`` entry for ``copied_libs['/sys/libB.dylib']`` | [
"Copy",
"libraries",
"required",
"for",
"files",
"in",
"lib_path",
"to",
"lib_path"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L150-L233 |
468 | matthew-brett/delocate | delocate/delocating.py | delocate_path | def delocate_path(tree_path, lib_path,
lib_filt_func = None,
copy_filt_func = filter_system_libs):
""" Copy required libraries for files in `tree_path` into `lib_path`
Parameters
----------
tree_path : str
Root path of tree to search for required libraries
lib_path : str
Directory into which we copy required libraries
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a file in the path depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library.
"""
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
if not exists(lib_path):
os.makedirs(lib_path)
lib_dict = tree_libs(tree_path, lib_filt_func)
if not copy_filt_func is None:
lib_dict = dict((key, value) for key, value in lib_dict.items()
if copy_filt_func(key))
copied = delocate_tree_libs(lib_dict, lib_path, tree_path)
return copy_recurse(lib_path, copy_filt_func, copied) | python | def delocate_path(tree_path, lib_path,
lib_filt_func = None,
copy_filt_func = filter_system_libs):
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
if not exists(lib_path):
os.makedirs(lib_path)
lib_dict = tree_libs(tree_path, lib_filt_func)
if not copy_filt_func is None:
lib_dict = dict((key, value) for key, value in lib_dict.items()
if copy_filt_func(key))
copied = delocate_tree_libs(lib_dict, lib_path, tree_path)
return copy_recurse(lib_path, copy_filt_func, copied) | [
"def",
"delocate_path",
"(",
"tree_path",
",",
"lib_path",
",",
"lib_filt_func",
"=",
"None",
",",
"copy_filt_func",
"=",
"filter_system_libs",
")",
":",
"if",
"lib_filt_func",
"==",
"\"dylibs-only\"",
":",
"lib_filt_func",
"=",
"_dylibs_only",
"if",
"not",
"exists",
"(",
"lib_path",
")",
":",
"os",
".",
"makedirs",
"(",
"lib_path",
")",
"lib_dict",
"=",
"tree_libs",
"(",
"tree_path",
",",
"lib_filt_func",
")",
"if",
"not",
"copy_filt_func",
"is",
"None",
":",
"lib_dict",
"=",
"dict",
"(",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"lib_dict",
".",
"items",
"(",
")",
"if",
"copy_filt_func",
"(",
"key",
")",
")",
"copied",
"=",
"delocate_tree_libs",
"(",
"lib_dict",
",",
"lib_path",
",",
"tree_path",
")",
"return",
"copy_recurse",
"(",
"lib_path",
",",
"copy_filt_func",
",",
"copied",
")"
] | Copy required libraries for files in `tree_path` into `lib_path`
Parameters
----------
tree_path : str
Root path of tree to search for required libraries
lib_path : str
Directory into which we copy required libraries
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a file in the path depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. | [
"Copy",
"required",
"libraries",
"for",
"files",
"in",
"tree_path",
"into",
"lib_path"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L246-L289 |
469 | matthew-brett/delocate | delocate/delocating.py | _merge_lib_dict | def _merge_lib_dict(d1, d2):
""" Merges lib_dict `d2` into lib_dict `d1`
"""
for required, requirings in d2.items():
if required in d1:
d1[required].update(requirings)
else:
d1[required] = requirings
return None | python | def _merge_lib_dict(d1, d2):
for required, requirings in d2.items():
if required in d1:
d1[required].update(requirings)
else:
d1[required] = requirings
return None | [
"def",
"_merge_lib_dict",
"(",
"d1",
",",
"d2",
")",
":",
"for",
"required",
",",
"requirings",
"in",
"d2",
".",
"items",
"(",
")",
":",
"if",
"required",
"in",
"d1",
":",
"d1",
"[",
"required",
"]",
".",
"update",
"(",
"requirings",
")",
"else",
":",
"d1",
"[",
"required",
"]",
"=",
"requirings",
"return",
"None"
] | Merges lib_dict `d2` into lib_dict `d1` | [
"Merges",
"lib_dict",
"d2",
"into",
"lib_dict",
"d1"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L292-L300 |
470 | matthew-brett/delocate | delocate/delocating.py | delocate_wheel | def delocate_wheel(in_wheel,
out_wheel = None,
lib_sdir = '.dylibs',
lib_filt_func = None,
copy_filt_func = filter_system_libs,
require_archs = None,
check_verbose = False,
):
""" Update wheel by copying required libraries to `lib_sdir` in wheel
Create `lib_sdir` in wheel tree only if we are copying one or more
libraries.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
out_wheel : None or str
Filename of processed wheel to write. If None, overwrite `in_wheel`
lib_sdir : str, optional
Subdirectory name in wheel package directory (or directories) to store
needed libraries.
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
require_archs : None or str or sequence, optional
If None, do no checks of architectures in libraries. If sequence,
sequence of architectures (output from ``lipo -info``) that every
library in the wheels should have (e.g. ``['x86_64, 'i386']``). An
empty sequence results in checks that depended libraries have the same
archs as depending libraries. If string, either "intel" (corresponds
to sequence ``['x86_64, 'i386']``) or name of required architecture
(e.g "i386" or "x86_64").
check_verbose : bool, optional
If True, print warning messages about missing required architectures
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a path in the wheel depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. The filenames in the keys are relative to the wheel root path.
"""
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
in_wheel = abspath(in_wheel)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
in_place = in_wheel == out_wheel
with TemporaryDirectory() as tmpdir:
all_copied = {}
wheel_dir = realpath(pjoin(tmpdir, 'wheel'))
zip2dir(in_wheel, wheel_dir)
for package_path in find_package_dirs(wheel_dir):
lib_path = pjoin(package_path, lib_sdir)
lib_path_exists = exists(lib_path)
copied_libs = delocate_path(package_path, lib_path,
lib_filt_func, copy_filt_func)
if copied_libs and lib_path_exists:
raise DelocationError(
'{0} already exists in wheel but need to copy '
'{1}'.format(lib_path, '; '.join(copied_libs)))
if len(os.listdir(lib_path)) == 0:
shutil.rmtree(lib_path)
# Check architectures
if not require_archs is None:
stop_fast = not check_verbose
bads = check_archs(copied_libs, require_archs, stop_fast)
if len(bads) != 0:
if check_verbose:
print(bads_report(bads, pjoin(tmpdir, 'wheel')))
raise DelocationError(
"Some missing architectures in wheel")
# Change install ids to be unique within Python space
install_id_root = (DLC_PREFIX +
relpath(package_path, wheel_dir) +
'/')
for lib in copied_libs:
lib_base = basename(lib)
copied_path = pjoin(lib_path, lib_base)
set_install_id(copied_path, install_id_root + lib_base)
validate_signature(copied_path)
_merge_lib_dict(all_copied, copied_libs)
if len(all_copied):
rewrite_record(wheel_dir)
if len(all_copied) or not in_place:
dir2zip(wheel_dir, out_wheel)
return stripped_lib_dict(all_copied, wheel_dir + os.path.sep) | python | def delocate_wheel(in_wheel,
out_wheel = None,
lib_sdir = '.dylibs',
lib_filt_func = None,
copy_filt_func = filter_system_libs,
require_archs = None,
check_verbose = False,
):
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
in_wheel = abspath(in_wheel)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
in_place = in_wheel == out_wheel
with TemporaryDirectory() as tmpdir:
all_copied = {}
wheel_dir = realpath(pjoin(tmpdir, 'wheel'))
zip2dir(in_wheel, wheel_dir)
for package_path in find_package_dirs(wheel_dir):
lib_path = pjoin(package_path, lib_sdir)
lib_path_exists = exists(lib_path)
copied_libs = delocate_path(package_path, lib_path,
lib_filt_func, copy_filt_func)
if copied_libs and lib_path_exists:
raise DelocationError(
'{0} already exists in wheel but need to copy '
'{1}'.format(lib_path, '; '.join(copied_libs)))
if len(os.listdir(lib_path)) == 0:
shutil.rmtree(lib_path)
# Check architectures
if not require_archs is None:
stop_fast = not check_verbose
bads = check_archs(copied_libs, require_archs, stop_fast)
if len(bads) != 0:
if check_verbose:
print(bads_report(bads, pjoin(tmpdir, 'wheel')))
raise DelocationError(
"Some missing architectures in wheel")
# Change install ids to be unique within Python space
install_id_root = (DLC_PREFIX +
relpath(package_path, wheel_dir) +
'/')
for lib in copied_libs:
lib_base = basename(lib)
copied_path = pjoin(lib_path, lib_base)
set_install_id(copied_path, install_id_root + lib_base)
validate_signature(copied_path)
_merge_lib_dict(all_copied, copied_libs)
if len(all_copied):
rewrite_record(wheel_dir)
if len(all_copied) or not in_place:
dir2zip(wheel_dir, out_wheel)
return stripped_lib_dict(all_copied, wheel_dir + os.path.sep) | [
"def",
"delocate_wheel",
"(",
"in_wheel",
",",
"out_wheel",
"=",
"None",
",",
"lib_sdir",
"=",
"'.dylibs'",
",",
"lib_filt_func",
"=",
"None",
",",
"copy_filt_func",
"=",
"filter_system_libs",
",",
"require_archs",
"=",
"None",
",",
"check_verbose",
"=",
"False",
",",
")",
":",
"if",
"lib_filt_func",
"==",
"\"dylibs-only\"",
":",
"lib_filt_func",
"=",
"_dylibs_only",
"in_wheel",
"=",
"abspath",
"(",
"in_wheel",
")",
"if",
"out_wheel",
"is",
"None",
":",
"out_wheel",
"=",
"in_wheel",
"else",
":",
"out_wheel",
"=",
"abspath",
"(",
"out_wheel",
")",
"in_place",
"=",
"in_wheel",
"==",
"out_wheel",
"with",
"TemporaryDirectory",
"(",
")",
"as",
"tmpdir",
":",
"all_copied",
"=",
"{",
"}",
"wheel_dir",
"=",
"realpath",
"(",
"pjoin",
"(",
"tmpdir",
",",
"'wheel'",
")",
")",
"zip2dir",
"(",
"in_wheel",
",",
"wheel_dir",
")",
"for",
"package_path",
"in",
"find_package_dirs",
"(",
"wheel_dir",
")",
":",
"lib_path",
"=",
"pjoin",
"(",
"package_path",
",",
"lib_sdir",
")",
"lib_path_exists",
"=",
"exists",
"(",
"lib_path",
")",
"copied_libs",
"=",
"delocate_path",
"(",
"package_path",
",",
"lib_path",
",",
"lib_filt_func",
",",
"copy_filt_func",
")",
"if",
"copied_libs",
"and",
"lib_path_exists",
":",
"raise",
"DelocationError",
"(",
"'{0} already exists in wheel but need to copy '",
"'{1}'",
".",
"format",
"(",
"lib_path",
",",
"'; '",
".",
"join",
"(",
"copied_libs",
")",
")",
")",
"if",
"len",
"(",
"os",
".",
"listdir",
"(",
"lib_path",
")",
")",
"==",
"0",
":",
"shutil",
".",
"rmtree",
"(",
"lib_path",
")",
"# Check architectures",
"if",
"not",
"require_archs",
"is",
"None",
":",
"stop_fast",
"=",
"not",
"check_verbose",
"bads",
"=",
"check_archs",
"(",
"copied_libs",
",",
"require_archs",
",",
"stop_fast",
")",
"if",
"len",
"(",
"bads",
")",
"!=",
"0",
":",
"if",
"check_verbose",
":",
"print",
"(",
"bads_report",
"(",
"bads",
",",
"pjoin",
"(",
"tmpdir",
",",
"'wheel'",
")",
")",
")",
"raise",
"DelocationError",
"(",
"\"Some missing architectures in wheel\"",
")",
"# Change install ids to be unique within Python space",
"install_id_root",
"=",
"(",
"DLC_PREFIX",
"+",
"relpath",
"(",
"package_path",
",",
"wheel_dir",
")",
"+",
"'/'",
")",
"for",
"lib",
"in",
"copied_libs",
":",
"lib_base",
"=",
"basename",
"(",
"lib",
")",
"copied_path",
"=",
"pjoin",
"(",
"lib_path",
",",
"lib_base",
")",
"set_install_id",
"(",
"copied_path",
",",
"install_id_root",
"+",
"lib_base",
")",
"validate_signature",
"(",
"copied_path",
")",
"_merge_lib_dict",
"(",
"all_copied",
",",
"copied_libs",
")",
"if",
"len",
"(",
"all_copied",
")",
":",
"rewrite_record",
"(",
"wheel_dir",
")",
"if",
"len",
"(",
"all_copied",
")",
"or",
"not",
"in_place",
":",
"dir2zip",
"(",
"wheel_dir",
",",
"out_wheel",
")",
"return",
"stripped_lib_dict",
"(",
"all_copied",
",",
"wheel_dir",
"+",
"os",
".",
"path",
".",
"sep",
")"
] | Update wheel by copying required libraries to `lib_sdir` in wheel
Create `lib_sdir` in wheel tree only if we are copying one or more
libraries.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
out_wheel : None or str
Filename of processed wheel to write. If None, overwrite `in_wheel`
lib_sdir : str, optional
Subdirectory name in wheel package directory (or directories) to store
needed libraries.
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
require_archs : None or str or sequence, optional
If None, do no checks of architectures in libraries. If sequence,
sequence of architectures (output from ``lipo -info``) that every
library in the wheels should have (e.g. ``['x86_64, 'i386']``). An
empty sequence results in checks that depended libraries have the same
archs as depending libraries. If string, either "intel" (corresponds
to sequence ``['x86_64, 'i386']``) or name of required architecture
(e.g "i386" or "x86_64").
check_verbose : bool, optional
If True, print warning messages about missing required architectures
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a path in the wheel depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. The filenames in the keys are relative to the wheel root path. | [
"Update",
"wheel",
"by",
"copying",
"required",
"libraries",
"to",
"lib_sdir",
"in",
"wheel"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L303-L407 |
471 | matthew-brett/delocate | delocate/delocating.py | patch_wheel | def patch_wheel(in_wheel, patch_fname, out_wheel=None):
""" Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel`
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
patch_fname : str
Filename of patch file. Will be applied with ``patch -p1 <
patch_fname``
out_wheel : None or str
Filename of patched wheel to write. If None, overwrite `in_wheel`
"""
in_wheel = abspath(in_wheel)
patch_fname = abspath(patch_fname)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
if not exists(patch_fname):
raise ValueError("patch file {0} does not exist".format(patch_fname))
with InWheel(in_wheel, out_wheel):
with open(patch_fname, 'rb') as fobj:
patch_proc = Popen(['patch', '-p1'],
stdin = fobj,
stdout = PIPE,
stderr = PIPE)
stdout, stderr = patch_proc.communicate()
if patch_proc.returncode != 0:
raise RuntimeError("Patch failed with stdout:\n" +
stdout.decode('latin1')) | python | def patch_wheel(in_wheel, patch_fname, out_wheel=None):
in_wheel = abspath(in_wheel)
patch_fname = abspath(patch_fname)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
if not exists(patch_fname):
raise ValueError("patch file {0} does not exist".format(patch_fname))
with InWheel(in_wheel, out_wheel):
with open(patch_fname, 'rb') as fobj:
patch_proc = Popen(['patch', '-p1'],
stdin = fobj,
stdout = PIPE,
stderr = PIPE)
stdout, stderr = patch_proc.communicate()
if patch_proc.returncode != 0:
raise RuntimeError("Patch failed with stdout:\n" +
stdout.decode('latin1')) | [
"def",
"patch_wheel",
"(",
"in_wheel",
",",
"patch_fname",
",",
"out_wheel",
"=",
"None",
")",
":",
"in_wheel",
"=",
"abspath",
"(",
"in_wheel",
")",
"patch_fname",
"=",
"abspath",
"(",
"patch_fname",
")",
"if",
"out_wheel",
"is",
"None",
":",
"out_wheel",
"=",
"in_wheel",
"else",
":",
"out_wheel",
"=",
"abspath",
"(",
"out_wheel",
")",
"if",
"not",
"exists",
"(",
"patch_fname",
")",
":",
"raise",
"ValueError",
"(",
"\"patch file {0} does not exist\"",
".",
"format",
"(",
"patch_fname",
")",
")",
"with",
"InWheel",
"(",
"in_wheel",
",",
"out_wheel",
")",
":",
"with",
"open",
"(",
"patch_fname",
",",
"'rb'",
")",
"as",
"fobj",
":",
"patch_proc",
"=",
"Popen",
"(",
"[",
"'patch'",
",",
"'-p1'",
"]",
",",
"stdin",
"=",
"fobj",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"stdout",
",",
"stderr",
"=",
"patch_proc",
".",
"communicate",
"(",
")",
"if",
"patch_proc",
".",
"returncode",
"!=",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Patch failed with stdout:\\n\"",
"+",
"stdout",
".",
"decode",
"(",
"'latin1'",
")",
")"
] | Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel`
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
patch_fname : str
Filename of patch file. Will be applied with ``patch -p1 <
patch_fname``
out_wheel : None or str
Filename of patched wheel to write. If None, overwrite `in_wheel` | [
"Apply",
"-",
"p1",
"style",
"patch",
"in",
"patch_fname",
"to",
"contents",
"of",
"in_wheel"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L410-L443 |
472 | matthew-brett/delocate | delocate/delocating.py | check_archs | def check_archs(copied_libs, require_archs=(), stop_fast=False):
""" Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
"""
if isinstance(require_archs, string_types):
require_archs = (['i386', 'x86_64'] if require_archs == 'intel'
else [require_archs])
require_archs = frozenset(require_archs)
bads = []
for depended_lib, dep_dict in copied_libs.items():
depended_archs = get_archs(depended_lib)
for depending_lib, install_name in dep_dict.items():
depending_archs = get_archs(depending_lib)
all_required = depending_archs | require_archs
all_missing = all_required.difference(depended_archs)
if len(all_missing) == 0:
continue
required_missing = require_archs.difference(depended_archs)
if len(required_missing):
bads.append((depending_lib, required_missing))
else:
bads.append((depended_lib, depending_lib, all_missing))
if stop_fast:
return set(bads)
return set(bads) | python | def check_archs(copied_libs, require_archs=(), stop_fast=False):
if isinstance(require_archs, string_types):
require_archs = (['i386', 'x86_64'] if require_archs == 'intel'
else [require_archs])
require_archs = frozenset(require_archs)
bads = []
for depended_lib, dep_dict in copied_libs.items():
depended_archs = get_archs(depended_lib)
for depending_lib, install_name in dep_dict.items():
depending_archs = get_archs(depending_lib)
all_required = depending_archs | require_archs
all_missing = all_required.difference(depended_archs)
if len(all_missing) == 0:
continue
required_missing = require_archs.difference(depended_archs)
if len(required_missing):
bads.append((depending_lib, required_missing))
else:
bads.append((depended_lib, depending_lib, all_missing))
if stop_fast:
return set(bads)
return set(bads) | [
"def",
"check_archs",
"(",
"copied_libs",
",",
"require_archs",
"=",
"(",
")",
",",
"stop_fast",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"require_archs",
",",
"string_types",
")",
":",
"require_archs",
"=",
"(",
"[",
"'i386'",
",",
"'x86_64'",
"]",
"if",
"require_archs",
"==",
"'intel'",
"else",
"[",
"require_archs",
"]",
")",
"require_archs",
"=",
"frozenset",
"(",
"require_archs",
")",
"bads",
"=",
"[",
"]",
"for",
"depended_lib",
",",
"dep_dict",
"in",
"copied_libs",
".",
"items",
"(",
")",
":",
"depended_archs",
"=",
"get_archs",
"(",
"depended_lib",
")",
"for",
"depending_lib",
",",
"install_name",
"in",
"dep_dict",
".",
"items",
"(",
")",
":",
"depending_archs",
"=",
"get_archs",
"(",
"depending_lib",
")",
"all_required",
"=",
"depending_archs",
"|",
"require_archs",
"all_missing",
"=",
"all_required",
".",
"difference",
"(",
"depended_archs",
")",
"if",
"len",
"(",
"all_missing",
")",
"==",
"0",
":",
"continue",
"required_missing",
"=",
"require_archs",
".",
"difference",
"(",
"depended_archs",
")",
"if",
"len",
"(",
"required_missing",
")",
":",
"bads",
".",
"append",
"(",
"(",
"depending_lib",
",",
"required_missing",
")",
")",
"else",
":",
"bads",
".",
"append",
"(",
"(",
"depended_lib",
",",
"depending_lib",
",",
"all_missing",
")",
")",
"if",
"stop_fast",
":",
"return",
"set",
"(",
"bads",
")",
"return",
"set",
"(",
"bads",
")"
] | Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required. | [
"Check",
"compatibility",
"of",
"archs",
"in",
"copied_libs",
"dict"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L446-L505 |
473 | matthew-brett/delocate | delocate/delocating.py | bads_report | def bads_report(bads, path_prefix=None):
""" Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing
"""
path_processor = ((lambda x : x) if path_prefix is None
else get_rp_stripper(path_prefix))
reports = []
for result in bads:
if len(result) == 3:
depended_lib, depending_lib, missing_archs = result
reports.append("{0} needs {1} {2} missing from {3}".format(
path_processor(depending_lib),
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depended_lib)))
elif len(result) == 2:
depending_lib, missing_archs = result
reports.append("Required {0} {1} missing from {2}".format(
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depending_lib)))
else:
raise ValueError('Report tuple should be length 2 or 3')
return '\n'.join(sorted(reports)) | python | def bads_report(bads, path_prefix=None):
path_processor = ((lambda x : x) if path_prefix is None
else get_rp_stripper(path_prefix))
reports = []
for result in bads:
if len(result) == 3:
depended_lib, depending_lib, missing_archs = result
reports.append("{0} needs {1} {2} missing from {3}".format(
path_processor(depending_lib),
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depended_lib)))
elif len(result) == 2:
depending_lib, missing_archs = result
reports.append("Required {0} {1} missing from {2}".format(
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depending_lib)))
else:
raise ValueError('Report tuple should be length 2 or 3')
return '\n'.join(sorted(reports)) | [
"def",
"bads_report",
"(",
"bads",
",",
"path_prefix",
"=",
"None",
")",
":",
"path_processor",
"=",
"(",
"(",
"lambda",
"x",
":",
"x",
")",
"if",
"path_prefix",
"is",
"None",
"else",
"get_rp_stripper",
"(",
"path_prefix",
")",
")",
"reports",
"=",
"[",
"]",
"for",
"result",
"in",
"bads",
":",
"if",
"len",
"(",
"result",
")",
"==",
"3",
":",
"depended_lib",
",",
"depending_lib",
",",
"missing_archs",
"=",
"result",
"reports",
".",
"append",
"(",
"\"{0} needs {1} {2} missing from {3}\"",
".",
"format",
"(",
"path_processor",
"(",
"depending_lib",
")",
",",
"'archs'",
"if",
"len",
"(",
"missing_archs",
")",
">",
"1",
"else",
"'arch'",
",",
"', '",
".",
"join",
"(",
"sorted",
"(",
"missing_archs",
")",
")",
",",
"path_processor",
"(",
"depended_lib",
")",
")",
")",
"elif",
"len",
"(",
"result",
")",
"==",
"2",
":",
"depending_lib",
",",
"missing_archs",
"=",
"result",
"reports",
".",
"append",
"(",
"\"Required {0} {1} missing from {2}\"",
".",
"format",
"(",
"'archs'",
"if",
"len",
"(",
"missing_archs",
")",
">",
"1",
"else",
"'arch'",
",",
"', '",
".",
"join",
"(",
"sorted",
"(",
"missing_archs",
")",
")",
",",
"path_processor",
"(",
"depending_lib",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Report tuple should be length 2 or 3'",
")",
"return",
"'\\n'",
".",
"join",
"(",
"sorted",
"(",
"reports",
")",
")"
] | Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing | [
"Return",
"a",
"nice",
"report",
"of",
"bad",
"architectures",
"in",
"bads"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L508-L552 |
474 | matthew-brett/delocate | delocate/libsana.py | tree_libs | def tree_libs(start_path, filt_func=None):
""" Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html
"""
lib_dict = {}
for dirpath, dirnames, basenames in os.walk(start_path):
for base in basenames:
depending_libpath = realpath(pjoin(dirpath, base))
if not filt_func is None and not filt_func(depending_libpath):
continue
rpaths = get_rpaths(depending_libpath)
for install_name in get_install_names(depending_libpath):
lib_path = (install_name if install_name.startswith('@')
else realpath(install_name))
lib_path = resolve_rpath(lib_path, rpaths)
if lib_path in lib_dict:
lib_dict[lib_path][depending_libpath] = install_name
else:
lib_dict[lib_path] = {depending_libpath: install_name}
return lib_dict | python | def tree_libs(start_path, filt_func=None):
lib_dict = {}
for dirpath, dirnames, basenames in os.walk(start_path):
for base in basenames:
depending_libpath = realpath(pjoin(dirpath, base))
if not filt_func is None and not filt_func(depending_libpath):
continue
rpaths = get_rpaths(depending_libpath)
for install_name in get_install_names(depending_libpath):
lib_path = (install_name if install_name.startswith('@')
else realpath(install_name))
lib_path = resolve_rpath(lib_path, rpaths)
if lib_path in lib_dict:
lib_dict[lib_path][depending_libpath] = install_name
else:
lib_dict[lib_path] = {depending_libpath: install_name}
return lib_dict | [
"def",
"tree_libs",
"(",
"start_path",
",",
"filt_func",
"=",
"None",
")",
":",
"lib_dict",
"=",
"{",
"}",
"for",
"dirpath",
",",
"dirnames",
",",
"basenames",
"in",
"os",
".",
"walk",
"(",
"start_path",
")",
":",
"for",
"base",
"in",
"basenames",
":",
"depending_libpath",
"=",
"realpath",
"(",
"pjoin",
"(",
"dirpath",
",",
"base",
")",
")",
"if",
"not",
"filt_func",
"is",
"None",
"and",
"not",
"filt_func",
"(",
"depending_libpath",
")",
":",
"continue",
"rpaths",
"=",
"get_rpaths",
"(",
"depending_libpath",
")",
"for",
"install_name",
"in",
"get_install_names",
"(",
"depending_libpath",
")",
":",
"lib_path",
"=",
"(",
"install_name",
"if",
"install_name",
".",
"startswith",
"(",
"'@'",
")",
"else",
"realpath",
"(",
"install_name",
")",
")",
"lib_path",
"=",
"resolve_rpath",
"(",
"lib_path",
",",
"rpaths",
")",
"if",
"lib_path",
"in",
"lib_dict",
":",
"lib_dict",
"[",
"lib_path",
"]",
"[",
"depending_libpath",
"]",
"=",
"install_name",
"else",
":",
"lib_dict",
"[",
"lib_path",
"]",
"=",
"{",
"depending_libpath",
":",
"install_name",
"}",
"return",
"lib_dict"
] | Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html | [
"Return",
"analysis",
"of",
"library",
"dependencies",
"within",
"start_path"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L14-L65 |
475 | matthew-brett/delocate | delocate/libsana.py | get_prefix_stripper | def get_prefix_stripper(strip_prefix):
""" Return function to strip `strip_prefix` prefix from string if present
Parameters
----------
prefix : str
Prefix to strip from the beginning of string if present
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip `prefix` from
``a_string`` if present, otherwise pass ``a_string`` unmodified
"""
n = len(strip_prefix)
def stripper(path):
return path if not path.startswith(strip_prefix) else path[n:]
return stripper | python | def get_prefix_stripper(strip_prefix):
n = len(strip_prefix)
def stripper(path):
return path if not path.startswith(strip_prefix) else path[n:]
return stripper | [
"def",
"get_prefix_stripper",
"(",
"strip_prefix",
")",
":",
"n",
"=",
"len",
"(",
"strip_prefix",
")",
"def",
"stripper",
"(",
"path",
")",
":",
"return",
"path",
"if",
"not",
"path",
".",
"startswith",
"(",
"strip_prefix",
")",
"else",
"path",
"[",
"n",
":",
"]",
"return",
"stripper"
] | Return function to strip `strip_prefix` prefix from string if present
Parameters
----------
prefix : str
Prefix to strip from the beginning of string if present
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip `prefix` from
``a_string`` if present, otherwise pass ``a_string`` unmodified | [
"Return",
"function",
"to",
"strip",
"strip_prefix",
"prefix",
"from",
"string",
"if",
"present"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L107-L124 |
476 | matthew-brett/delocate | delocate/libsana.py | stripped_lib_dict | def stripped_lib_dict(lib_dict, strip_prefix):
""" Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths.
"""
relative_dict = {}
stripper = get_prefix_stripper(strip_prefix)
for lib_path, dependings_dict in lib_dict.items():
ding_dict = {}
for depending_libpath, install_name in dependings_dict.items():
ding_dict[stripper(depending_libpath)] = install_name
relative_dict[stripper(lib_path)] = ding_dict
return relative_dict | python | def stripped_lib_dict(lib_dict, strip_prefix):
relative_dict = {}
stripper = get_prefix_stripper(strip_prefix)
for lib_path, dependings_dict in lib_dict.items():
ding_dict = {}
for depending_libpath, install_name in dependings_dict.items():
ding_dict[stripper(depending_libpath)] = install_name
relative_dict[stripper(lib_path)] = ding_dict
return relative_dict | [
"def",
"stripped_lib_dict",
"(",
"lib_dict",
",",
"strip_prefix",
")",
":",
"relative_dict",
"=",
"{",
"}",
"stripper",
"=",
"get_prefix_stripper",
"(",
"strip_prefix",
")",
"for",
"lib_path",
",",
"dependings_dict",
"in",
"lib_dict",
".",
"items",
"(",
")",
":",
"ding_dict",
"=",
"{",
"}",
"for",
"depending_libpath",
",",
"install_name",
"in",
"dependings_dict",
".",
"items",
"(",
")",
":",
"ding_dict",
"[",
"stripper",
"(",
"depending_libpath",
")",
"]",
"=",
"install_name",
"relative_dict",
"[",
"stripper",
"(",
"lib_path",
")",
"]",
"=",
"ding_dict",
"return",
"relative_dict"
] | Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths. | [
"Return",
"lib_dict",
"with",
"strip_prefix",
"removed",
"from",
"start",
"of",
"paths"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L145-L175 |
477 | matthew-brett/delocate | delocate/libsana.py | wheel_libs | def wheel_libs(wheel_fname, filt_func = None):
""" Return analysis of library dependencies with a Python wheel
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel tree. ``dependings_dict``
is (key, value) of (``depending_lib_path``, ``install_name``). Again,
``depending_lib_path`` is library relative to wheel root path, if
within wheel tree.
"""
with TemporaryDirectory() as tmpdir:
zip2dir(wheel_fname, tmpdir)
lib_dict = tree_libs(tmpdir, filt_func)
return stripped_lib_dict(lib_dict, realpath(tmpdir) + os.path.sep) | python | def wheel_libs(wheel_fname, filt_func = None):
with TemporaryDirectory() as tmpdir:
zip2dir(wheel_fname, tmpdir)
lib_dict = tree_libs(tmpdir, filt_func)
return stripped_lib_dict(lib_dict, realpath(tmpdir) + os.path.sep) | [
"def",
"wheel_libs",
"(",
"wheel_fname",
",",
"filt_func",
"=",
"None",
")",
":",
"with",
"TemporaryDirectory",
"(",
")",
"as",
"tmpdir",
":",
"zip2dir",
"(",
"wheel_fname",
",",
"tmpdir",
")",
"lib_dict",
"=",
"tree_libs",
"(",
"tmpdir",
",",
"filt_func",
")",
"return",
"stripped_lib_dict",
"(",
"lib_dict",
",",
"realpath",
"(",
"tmpdir",
")",
"+",
"os",
".",
"path",
".",
"sep",
")"
] | Return analysis of library dependencies with a Python wheel
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel tree. ``dependings_dict``
is (key, value) of (``depending_lib_path``, ``install_name``). Again,
``depending_lib_path`` is library relative to wheel root path, if
within wheel tree. | [
"Return",
"analysis",
"of",
"library",
"dependencies",
"with",
"a",
"Python",
"wheel"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L178-L205 |
478 | matthew-brett/delocate | delocate/wheeltools.py | rewrite_record | def rewrite_record(bdist_dir):
""" Rewrite RECORD file with hashes for all files in `wheel_sdir`
Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record`
Will also unsign wheel
Parameters
----------
bdist_dir : str
Path of unpacked wheel file
"""
info_dirs = glob.glob(pjoin(bdist_dir, '*.dist-info'))
if len(info_dirs) != 1:
raise WheelToolsError("Should be exactly one `*.dist_info` directory")
record_path = pjoin(info_dirs[0], 'RECORD')
record_relpath = relpath(record_path, bdist_dir)
# Unsign wheel - because we're invalidating the record hash
sig_path = pjoin(info_dirs[0], 'RECORD.jws')
if exists(sig_path):
os.unlink(sig_path)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
for f in files:
yield pjoin(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with _open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relative_path = relpath(path, bdist_dir)
if skip(relative_path):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
path_for_record = relpath(
path, bdist_dir).replace(psep, '/')
writer.writerow((path_for_record, hash, size)) | python | def rewrite_record(bdist_dir):
info_dirs = glob.glob(pjoin(bdist_dir, '*.dist-info'))
if len(info_dirs) != 1:
raise WheelToolsError("Should be exactly one `*.dist_info` directory")
record_path = pjoin(info_dirs[0], 'RECORD')
record_relpath = relpath(record_path, bdist_dir)
# Unsign wheel - because we're invalidating the record hash
sig_path = pjoin(info_dirs[0], 'RECORD.jws')
if exists(sig_path):
os.unlink(sig_path)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
for f in files:
yield pjoin(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with _open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relative_path = relpath(path, bdist_dir)
if skip(relative_path):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
path_for_record = relpath(
path, bdist_dir).replace(psep, '/')
writer.writerow((path_for_record, hash, size)) | [
"def",
"rewrite_record",
"(",
"bdist_dir",
")",
":",
"info_dirs",
"=",
"glob",
".",
"glob",
"(",
"pjoin",
"(",
"bdist_dir",
",",
"'*.dist-info'",
")",
")",
"if",
"len",
"(",
"info_dirs",
")",
"!=",
"1",
":",
"raise",
"WheelToolsError",
"(",
"\"Should be exactly one `*.dist_info` directory\"",
")",
"record_path",
"=",
"pjoin",
"(",
"info_dirs",
"[",
"0",
"]",
",",
"'RECORD'",
")",
"record_relpath",
"=",
"relpath",
"(",
"record_path",
",",
"bdist_dir",
")",
"# Unsign wheel - because we're invalidating the record hash",
"sig_path",
"=",
"pjoin",
"(",
"info_dirs",
"[",
"0",
"]",
",",
"'RECORD.jws'",
")",
"if",
"exists",
"(",
"sig_path",
")",
":",
"os",
".",
"unlink",
"(",
"sig_path",
")",
"def",
"walk",
"(",
")",
":",
"for",
"dir",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"bdist_dir",
")",
":",
"for",
"f",
"in",
"files",
":",
"yield",
"pjoin",
"(",
"dir",
",",
"f",
")",
"def",
"skip",
"(",
"path",
")",
":",
"\"\"\"Wheel hashes every possible file.\"\"\"",
"return",
"(",
"path",
"==",
"record_relpath",
")",
"with",
"_open_for_csv",
"(",
"record_path",
",",
"'w+'",
")",
"as",
"record_file",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"record_file",
")",
"for",
"path",
"in",
"walk",
"(",
")",
":",
"relative_path",
"=",
"relpath",
"(",
"path",
",",
"bdist_dir",
")",
"if",
"skip",
"(",
"relative_path",
")",
":",
"hash",
"=",
"''",
"size",
"=",
"''",
"else",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"digest",
"=",
"hashlib",
".",
"sha256",
"(",
"data",
")",
".",
"digest",
"(",
")",
"hash",
"=",
"'sha256='",
"+",
"native",
"(",
"urlsafe_b64encode",
"(",
"digest",
")",
")",
"size",
"=",
"len",
"(",
"data",
")",
"path_for_record",
"=",
"relpath",
"(",
"path",
",",
"bdist_dir",
")",
".",
"replace",
"(",
"psep",
",",
"'/'",
")",
"writer",
".",
"writerow",
"(",
"(",
"path_for_record",
",",
"hash",
",",
"size",
")",
")"
] | Rewrite RECORD file with hashes for all files in `wheel_sdir`
Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record`
Will also unsign wheel
Parameters
----------
bdist_dir : str
Path of unpacked wheel file | [
"Rewrite",
"RECORD",
"file",
"with",
"hashes",
"for",
"all",
"files",
"in",
"wheel_sdir"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/wheeltools.py#L35-L81 |
479 | matthew-brett/delocate | delocate/wheeltools.py | add_platforms | def add_platforms(in_wheel, platforms, out_path=None, clobber=False):
""" Add platform tags `platforms` to `in_wheel` filename and WHEEL tags
Add any platform tags in `platforms` that are missing from `in_wheel`
filename.
Add any platform tags in `platforms` that are missing from `in_wheel`
``WHEEL`` file.
Parameters
----------
in_wheel : str
Filename of wheel to which to add platform tags
platforms : iterable
platform tags to add to wheel filename and WHEEL tags - e.g.
``('macosx_10_9_intel', 'macosx_10_9_x86_64')
out_path : None or str, optional
Directory to which to write new wheel. Default is directory containing
`in_wheel`
clobber : bool, optional
If True, overwrite existing output filename, otherwise raise error
Returns
-------
out_wheel : None or str
Absolute path of wheel file written, or None if no wheel file written.
"""
in_wheel = abspath(in_wheel)
out_path = dirname(in_wheel) if out_path is None else abspath(out_path)
wf = WheelFile(in_wheel)
info_fname = _get_wheelinfo_name(wf)
# Check what tags we have
in_fname_tags = wf.parsed_filename.groupdict()['plat'].split('.')
extra_fname_tags = [tag for tag in platforms if tag not in in_fname_tags]
in_wheel_base, ext = splitext(basename(in_wheel))
out_wheel_base = '.'.join([in_wheel_base] + list(extra_fname_tags))
out_wheel = pjoin(out_path, out_wheel_base + ext)
if exists(out_wheel) and not clobber:
raise WheelToolsError('Not overwriting {0}; set clobber=True '
'to overwrite'.format(out_wheel))
with InWheelCtx(in_wheel) as ctx:
info = read_pkg_info(info_fname)
if info['Root-Is-Purelib'] == 'true':
raise WheelToolsError('Cannot add platforms to pure wheel')
in_info_tags = [tag for name, tag in info.items() if name == 'Tag']
# Python version, C-API version combinations
pyc_apis = ['-'.join(tag.split('-')[:2]) for tag in in_info_tags]
# unique Python version, C-API version combinations
pyc_apis = unique_by_index(pyc_apis)
# Add new platform tags for each Python version, C-API combination
required_tags = ['-'.join(tup) for tup in product(pyc_apis, platforms)]
needs_write = False
for req_tag in required_tags:
if req_tag in in_info_tags: continue
needs_write = True
info.add_header('Tag', req_tag)
if needs_write:
write_pkg_info(info_fname, info)
# Tell context manager to write wheel on exit by setting filename
ctx.out_wheel = out_wheel
return ctx.out_wheel | python | def add_platforms(in_wheel, platforms, out_path=None, clobber=False):
in_wheel = abspath(in_wheel)
out_path = dirname(in_wheel) if out_path is None else abspath(out_path)
wf = WheelFile(in_wheel)
info_fname = _get_wheelinfo_name(wf)
# Check what tags we have
in_fname_tags = wf.parsed_filename.groupdict()['plat'].split('.')
extra_fname_tags = [tag for tag in platforms if tag not in in_fname_tags]
in_wheel_base, ext = splitext(basename(in_wheel))
out_wheel_base = '.'.join([in_wheel_base] + list(extra_fname_tags))
out_wheel = pjoin(out_path, out_wheel_base + ext)
if exists(out_wheel) and not clobber:
raise WheelToolsError('Not overwriting {0}; set clobber=True '
'to overwrite'.format(out_wheel))
with InWheelCtx(in_wheel) as ctx:
info = read_pkg_info(info_fname)
if info['Root-Is-Purelib'] == 'true':
raise WheelToolsError('Cannot add platforms to pure wheel')
in_info_tags = [tag for name, tag in info.items() if name == 'Tag']
# Python version, C-API version combinations
pyc_apis = ['-'.join(tag.split('-')[:2]) for tag in in_info_tags]
# unique Python version, C-API version combinations
pyc_apis = unique_by_index(pyc_apis)
# Add new platform tags for each Python version, C-API combination
required_tags = ['-'.join(tup) for tup in product(pyc_apis, platforms)]
needs_write = False
for req_tag in required_tags:
if req_tag in in_info_tags: continue
needs_write = True
info.add_header('Tag', req_tag)
if needs_write:
write_pkg_info(info_fname, info)
# Tell context manager to write wheel on exit by setting filename
ctx.out_wheel = out_wheel
return ctx.out_wheel | [
"def",
"add_platforms",
"(",
"in_wheel",
",",
"platforms",
",",
"out_path",
"=",
"None",
",",
"clobber",
"=",
"False",
")",
":",
"in_wheel",
"=",
"abspath",
"(",
"in_wheel",
")",
"out_path",
"=",
"dirname",
"(",
"in_wheel",
")",
"if",
"out_path",
"is",
"None",
"else",
"abspath",
"(",
"out_path",
")",
"wf",
"=",
"WheelFile",
"(",
"in_wheel",
")",
"info_fname",
"=",
"_get_wheelinfo_name",
"(",
"wf",
")",
"# Check what tags we have",
"in_fname_tags",
"=",
"wf",
".",
"parsed_filename",
".",
"groupdict",
"(",
")",
"[",
"'plat'",
"]",
".",
"split",
"(",
"'.'",
")",
"extra_fname_tags",
"=",
"[",
"tag",
"for",
"tag",
"in",
"platforms",
"if",
"tag",
"not",
"in",
"in_fname_tags",
"]",
"in_wheel_base",
",",
"ext",
"=",
"splitext",
"(",
"basename",
"(",
"in_wheel",
")",
")",
"out_wheel_base",
"=",
"'.'",
".",
"join",
"(",
"[",
"in_wheel_base",
"]",
"+",
"list",
"(",
"extra_fname_tags",
")",
")",
"out_wheel",
"=",
"pjoin",
"(",
"out_path",
",",
"out_wheel_base",
"+",
"ext",
")",
"if",
"exists",
"(",
"out_wheel",
")",
"and",
"not",
"clobber",
":",
"raise",
"WheelToolsError",
"(",
"'Not overwriting {0}; set clobber=True '",
"'to overwrite'",
".",
"format",
"(",
"out_wheel",
")",
")",
"with",
"InWheelCtx",
"(",
"in_wheel",
")",
"as",
"ctx",
":",
"info",
"=",
"read_pkg_info",
"(",
"info_fname",
")",
"if",
"info",
"[",
"'Root-Is-Purelib'",
"]",
"==",
"'true'",
":",
"raise",
"WheelToolsError",
"(",
"'Cannot add platforms to pure wheel'",
")",
"in_info_tags",
"=",
"[",
"tag",
"for",
"name",
",",
"tag",
"in",
"info",
".",
"items",
"(",
")",
"if",
"name",
"==",
"'Tag'",
"]",
"# Python version, C-API version combinations",
"pyc_apis",
"=",
"[",
"'-'",
".",
"join",
"(",
"tag",
".",
"split",
"(",
"'-'",
")",
"[",
":",
"2",
"]",
")",
"for",
"tag",
"in",
"in_info_tags",
"]",
"# unique Python version, C-API version combinations",
"pyc_apis",
"=",
"unique_by_index",
"(",
"pyc_apis",
")",
"# Add new platform tags for each Python version, C-API combination",
"required_tags",
"=",
"[",
"'-'",
".",
"join",
"(",
"tup",
")",
"for",
"tup",
"in",
"product",
"(",
"pyc_apis",
",",
"platforms",
")",
"]",
"needs_write",
"=",
"False",
"for",
"req_tag",
"in",
"required_tags",
":",
"if",
"req_tag",
"in",
"in_info_tags",
":",
"continue",
"needs_write",
"=",
"True",
"info",
".",
"add_header",
"(",
"'Tag'",
",",
"req_tag",
")",
"if",
"needs_write",
":",
"write_pkg_info",
"(",
"info_fname",
",",
"info",
")",
"# Tell context manager to write wheel on exit by setting filename",
"ctx",
".",
"out_wheel",
"=",
"out_wheel",
"return",
"ctx",
".",
"out_wheel"
] | Add platform tags `platforms` to `in_wheel` filename and WHEEL tags
Add any platform tags in `platforms` that are missing from `in_wheel`
filename.
Add any platform tags in `platforms` that are missing from `in_wheel`
``WHEEL`` file.
Parameters
----------
in_wheel : str
Filename of wheel to which to add platform tags
platforms : iterable
platform tags to add to wheel filename and WHEEL tags - e.g.
``('macosx_10_9_intel', 'macosx_10_9_x86_64')
out_path : None or str, optional
Directory to which to write new wheel. Default is directory containing
`in_wheel`
clobber : bool, optional
If True, overwrite existing output filename, otherwise raise error
Returns
-------
out_wheel : None or str
Absolute path of wheel file written, or None if no wheel file written. | [
"Add",
"platform",
"tags",
"platforms",
"to",
"in_wheel",
"filename",
"and",
"WHEEL",
"tags"
] | ed48de15fce31c3f52f1a9f32cae1b02fc55aa60 | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/wheeltools.py#L162-L222 |
480 | wiheto/teneto | teneto/networkmeasures/temporal_betweenness_centrality.py | temporal_betweenness_centrality | def temporal_betweenness_centrality(tnet=None, paths=None, calc='time'):
'''
Returns temporal betweenness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
calc : str
either 'global' or 'time'
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
normalized temporal betweenness centrality.
If calc = 'time', returns (node,time)
If calc = 'global', returns (node)
'''
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
bet = np.zeros([paths[['from', 'to']].max().max() +
1, paths['t_start'].max()+1])
for row in paths.iterrows():
if (np.isnan(row[1]['path includes'])).all():
pass
else:
nodes_in_path = np.unique(np.concatenate(
row[1]['path includes'])).astype(int).tolist()
nodes_in_path.remove(row[1]['from'])
nodes_in_path.remove(row[1]['to'])
if len(nodes_in_path) > 0:
bet[nodes_in_path, row[1]['t_start']] += 1
# Normalise bet
bet = (1/((bet.shape[0]-1)*(bet.shape[0]-2))) * bet
if calc == 'global':
bet = np.mean(bet, axis=1)
return bet | python | def temporal_betweenness_centrality(tnet=None, paths=None, calc='time'):
'''
Returns temporal betweenness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
calc : str
either 'global' or 'time'
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
normalized temporal betweenness centrality.
If calc = 'time', returns (node,time)
If calc = 'global', returns (node)
'''
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
bet = np.zeros([paths[['from', 'to']].max().max() +
1, paths['t_start'].max()+1])
for row in paths.iterrows():
if (np.isnan(row[1]['path includes'])).all():
pass
else:
nodes_in_path = np.unique(np.concatenate(
row[1]['path includes'])).astype(int).tolist()
nodes_in_path.remove(row[1]['from'])
nodes_in_path.remove(row[1]['to'])
if len(nodes_in_path) > 0:
bet[nodes_in_path, row[1]['t_start']] += 1
# Normalise bet
bet = (1/((bet.shape[0]-1)*(bet.shape[0]-2))) * bet
if calc == 'global':
bet = np.mean(bet, axis=1)
return bet | [
"def",
"temporal_betweenness_centrality",
"(",
"tnet",
"=",
"None",
",",
"paths",
"=",
"None",
",",
"calc",
"=",
"'time'",
")",
":",
"if",
"tnet",
"is",
"not",
"None",
"and",
"paths",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'Only network or path input allowed.'",
")",
"if",
"tnet",
"is",
"None",
"and",
"paths",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No input.'",
")",
"# if shortest paths are not calculated, calculate them",
"if",
"tnet",
"is",
"not",
"None",
":",
"paths",
"=",
"shortest_temporal_path",
"(",
"tnet",
")",
"bet",
"=",
"np",
".",
"zeros",
"(",
"[",
"paths",
"[",
"[",
"'from'",
",",
"'to'",
"]",
"]",
".",
"max",
"(",
")",
".",
"max",
"(",
")",
"+",
"1",
",",
"paths",
"[",
"'t_start'",
"]",
".",
"max",
"(",
")",
"+",
"1",
"]",
")",
"for",
"row",
"in",
"paths",
".",
"iterrows",
"(",
")",
":",
"if",
"(",
"np",
".",
"isnan",
"(",
"row",
"[",
"1",
"]",
"[",
"'path includes'",
"]",
")",
")",
".",
"all",
"(",
")",
":",
"pass",
"else",
":",
"nodes_in_path",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"concatenate",
"(",
"row",
"[",
"1",
"]",
"[",
"'path includes'",
"]",
")",
")",
".",
"astype",
"(",
"int",
")",
".",
"tolist",
"(",
")",
"nodes_in_path",
".",
"remove",
"(",
"row",
"[",
"1",
"]",
"[",
"'from'",
"]",
")",
"nodes_in_path",
".",
"remove",
"(",
"row",
"[",
"1",
"]",
"[",
"'to'",
"]",
")",
"if",
"len",
"(",
"nodes_in_path",
")",
">",
"0",
":",
"bet",
"[",
"nodes_in_path",
",",
"row",
"[",
"1",
"]",
"[",
"'t_start'",
"]",
"]",
"+=",
"1",
"# Normalise bet",
"bet",
"=",
"(",
"1",
"/",
"(",
"(",
"bet",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"*",
"(",
"bet",
".",
"shape",
"[",
"0",
"]",
"-",
"2",
")",
")",
")",
"*",
"bet",
"if",
"calc",
"==",
"'global'",
":",
"bet",
"=",
"np",
".",
"mean",
"(",
"bet",
",",
"axis",
"=",
"1",
")",
"return",
"bet"
] | Returns temporal betweenness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
calc : str
either 'global' or 'time'
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
normalized temporal betweenness centrality.
If calc = 'time', returns (node,time)
If calc = 'global', returns (node) | [
"Returns",
"temporal",
"betweenness",
"centrality",
"per",
"node",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/temporal_betweenness_centrality.py#L9-L72 |
481 | wiheto/teneto | teneto/temporalcommunity/allegiance.py | allegiance | def allegiance(community):
"""
Computes the allegiance matrix with values representing the probability that
nodes i and j were assigned to the same community by time-varying clustering methods.
parameters
----------
community : array
array of community assignment of size node,time
returns
-------
P : array
module allegiance matrix, with P_ij probability that area i and j are in the same community
Reference:
----------
Bassett, et al. (2013) “Robust detection of dynamic community structure in networks”, Chaos, 23, 1
"""
N = community.shape[0]
C = community.shape[1]
T = P = np.zeros([N, N])
for t in range(len(community[0, :])):
for i in range(len(community[:, 0])):
for j in range(len(community[:, 0])):
if i == j:
continue
# T_ij indicates the number of times that i and j are assigned to the same community across time
if community[i][t] == community[j][t]:
T[i, j] += 1
# module allegiance matrix, probability that ij were assigned to the same community
P = (1/C)*T
return P | python | def allegiance(community):
N = community.shape[0]
C = community.shape[1]
T = P = np.zeros([N, N])
for t in range(len(community[0, :])):
for i in range(len(community[:, 0])):
for j in range(len(community[:, 0])):
if i == j:
continue
# T_ij indicates the number of times that i and j are assigned to the same community across time
if community[i][t] == community[j][t]:
T[i, j] += 1
# module allegiance matrix, probability that ij were assigned to the same community
P = (1/C)*T
return P | [
"def",
"allegiance",
"(",
"community",
")",
":",
"N",
"=",
"community",
".",
"shape",
"[",
"0",
"]",
"C",
"=",
"community",
".",
"shape",
"[",
"1",
"]",
"T",
"=",
"P",
"=",
"np",
".",
"zeros",
"(",
"[",
"N",
",",
"N",
"]",
")",
"for",
"t",
"in",
"range",
"(",
"len",
"(",
"community",
"[",
"0",
",",
":",
"]",
")",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"community",
"[",
":",
",",
"0",
"]",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"community",
"[",
":",
",",
"0",
"]",
")",
")",
":",
"if",
"i",
"==",
"j",
":",
"continue",
"# T_ij indicates the number of times that i and j are assigned to the same community across time",
"if",
"community",
"[",
"i",
"]",
"[",
"t",
"]",
"==",
"community",
"[",
"j",
"]",
"[",
"t",
"]",
":",
"T",
"[",
"i",
",",
"j",
"]",
"+=",
"1",
"# module allegiance matrix, probability that ij were assigned to the same community",
"P",
"=",
"(",
"1",
"/",
"C",
")",
"*",
"T",
"return",
"P"
] | Computes the allegiance matrix with values representing the probability that
nodes i and j were assigned to the same community by time-varying clustering methods.
parameters
----------
community : array
array of community assignment of size node,time
returns
-------
P : array
module allegiance matrix, with P_ij probability that area i and j are in the same community
Reference:
----------
Bassett, et al. (2013) “Robust detection of dynamic community structure in networks”, Chaos, 23, 1 | [
"Computes",
"the",
"allegiance",
"matrix",
"with",
"values",
"representing",
"the",
"probability",
"that",
"nodes",
"i",
"and",
"j",
"were",
"assigned",
"to",
"the",
"same",
"community",
"by",
"time",
"-",
"varying",
"clustering",
"methods",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/temporalcommunity/allegiance.py#L3-L39 |
482 | wiheto/teneto | teneto/generatenetwork/rand_poisson.py | rand_poisson | def rand_poisson(nnodes, ncontacts, lam=1, nettype='bu', netinfo=None, netrep='graphlet'):
"""
Generate a random network where intervals between contacts are distributed by a poisson distribution
Parameters
----------
nnodes : int
Number of nodes in networks
ncontacts : int or list
Number of expected contacts (i.e. edges). If list, number of contacts for each node.
Any zeros drawn are ignored so returned degree of network can be smaller than ncontacts.
lam : int or list
Expectation of interval.
nettype : str
'bu' or 'bd'
netinfo : dict
Dictionary of additional information
netrep : str
How the output should be.
If ncontacts is a list, so should lam.
Returns
-------
net : array or dict
Random network with intervals between active edges being Poisson distributed.
"""
if isinstance(ncontacts, list):
if len(ncontacts) != nnodes:
raise ValueError(
'Number of contacts, if a list, should be one per node')
if isinstance(lam, list):
if len(lam) != nnodes:
raise ValueError(
'Lambda value of Poisson distribution, if a list, should be one per node')
if isinstance(lam, list) and not isinstance(ncontacts, list) or not isinstance(lam, list) and isinstance(ncontacts, list):
raise ValueError(
'When one of lambda or ncontacts is given as a list, the other argument must also be a list.')
if nettype == 'bu':
edgen = int((nnodes*(nnodes-1))/2)
elif nettype == 'bd':
edgen = int(nnodes*nnodes)
if not isinstance(lam, list) and not isinstance(ncontacts, list):
icts = np.random.poisson(lam, size=(edgen, ncontacts))
net = np.zeros([edgen, icts.sum(axis=1).max()+1])
for n in range(edgen):
net[n, np.unique(np.cumsum(icts[n]))] = 1
else:
icts = []
ict_max = 0
for n in range(edgen):
icts.append(np.random.poisson(lam[n], size=ncontacts[n]))
if sum(icts[-1]) > ict_max:
ict_max = sum(icts[-1])
net = np.zeros([nnodes, ict_max+1])
for n in range(nnodes):
net[n, np.unique(np.cumsum(icts[n]))] = 1
if nettype == 'bu':
nettmp = np.zeros([nnodes, nnodes, net.shape[-1]])
ind = np.triu_indices(nnodes, k=1)
nettmp[ind[0], ind[1], :] = net
net = nettmp + nettmp.transpose([1, 0, 2])
elif nettype == 'bd':
net = net.reshape([nnodes, nnodes, net.shape[-1]], order='F')
net = set_diagonal(net, 0)
if netrep == 'contact':
if not netinfo:
netinfo = {}
netinfo['nettype'] = 'b' + nettype[-1]
net = graphlet2contact(net, netinfo)
return net | python | def rand_poisson(nnodes, ncontacts, lam=1, nettype='bu', netinfo=None, netrep='graphlet'):
if isinstance(ncontacts, list):
if len(ncontacts) != nnodes:
raise ValueError(
'Number of contacts, if a list, should be one per node')
if isinstance(lam, list):
if len(lam) != nnodes:
raise ValueError(
'Lambda value of Poisson distribution, if a list, should be one per node')
if isinstance(lam, list) and not isinstance(ncontacts, list) or not isinstance(lam, list) and isinstance(ncontacts, list):
raise ValueError(
'When one of lambda or ncontacts is given as a list, the other argument must also be a list.')
if nettype == 'bu':
edgen = int((nnodes*(nnodes-1))/2)
elif nettype == 'bd':
edgen = int(nnodes*nnodes)
if not isinstance(lam, list) and not isinstance(ncontacts, list):
icts = np.random.poisson(lam, size=(edgen, ncontacts))
net = np.zeros([edgen, icts.sum(axis=1).max()+1])
for n in range(edgen):
net[n, np.unique(np.cumsum(icts[n]))] = 1
else:
icts = []
ict_max = 0
for n in range(edgen):
icts.append(np.random.poisson(lam[n], size=ncontacts[n]))
if sum(icts[-1]) > ict_max:
ict_max = sum(icts[-1])
net = np.zeros([nnodes, ict_max+1])
for n in range(nnodes):
net[n, np.unique(np.cumsum(icts[n]))] = 1
if nettype == 'bu':
nettmp = np.zeros([nnodes, nnodes, net.shape[-1]])
ind = np.triu_indices(nnodes, k=1)
nettmp[ind[0], ind[1], :] = net
net = nettmp + nettmp.transpose([1, 0, 2])
elif nettype == 'bd':
net = net.reshape([nnodes, nnodes, net.shape[-1]], order='F')
net = set_diagonal(net, 0)
if netrep == 'contact':
if not netinfo:
netinfo = {}
netinfo['nettype'] = 'b' + nettype[-1]
net = graphlet2contact(net, netinfo)
return net | [
"def",
"rand_poisson",
"(",
"nnodes",
",",
"ncontacts",
",",
"lam",
"=",
"1",
",",
"nettype",
"=",
"'bu'",
",",
"netinfo",
"=",
"None",
",",
"netrep",
"=",
"'graphlet'",
")",
":",
"if",
"isinstance",
"(",
"ncontacts",
",",
"list",
")",
":",
"if",
"len",
"(",
"ncontacts",
")",
"!=",
"nnodes",
":",
"raise",
"ValueError",
"(",
"'Number of contacts, if a list, should be one per node'",
")",
"if",
"isinstance",
"(",
"lam",
",",
"list",
")",
":",
"if",
"len",
"(",
"lam",
")",
"!=",
"nnodes",
":",
"raise",
"ValueError",
"(",
"'Lambda value of Poisson distribution, if a list, should be one per node'",
")",
"if",
"isinstance",
"(",
"lam",
",",
"list",
")",
"and",
"not",
"isinstance",
"(",
"ncontacts",
",",
"list",
")",
"or",
"not",
"isinstance",
"(",
"lam",
",",
"list",
")",
"and",
"isinstance",
"(",
"ncontacts",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"'When one of lambda or ncontacts is given as a list, the other argument must also be a list.'",
")",
"if",
"nettype",
"==",
"'bu'",
":",
"edgen",
"=",
"int",
"(",
"(",
"nnodes",
"*",
"(",
"nnodes",
"-",
"1",
")",
")",
"/",
"2",
")",
"elif",
"nettype",
"==",
"'bd'",
":",
"edgen",
"=",
"int",
"(",
"nnodes",
"*",
"nnodes",
")",
"if",
"not",
"isinstance",
"(",
"lam",
",",
"list",
")",
"and",
"not",
"isinstance",
"(",
"ncontacts",
",",
"list",
")",
":",
"icts",
"=",
"np",
".",
"random",
".",
"poisson",
"(",
"lam",
",",
"size",
"=",
"(",
"edgen",
",",
"ncontacts",
")",
")",
"net",
"=",
"np",
".",
"zeros",
"(",
"[",
"edgen",
",",
"icts",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"max",
"(",
")",
"+",
"1",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"edgen",
")",
":",
"net",
"[",
"n",
",",
"np",
".",
"unique",
"(",
"np",
".",
"cumsum",
"(",
"icts",
"[",
"n",
"]",
")",
")",
"]",
"=",
"1",
"else",
":",
"icts",
"=",
"[",
"]",
"ict_max",
"=",
"0",
"for",
"n",
"in",
"range",
"(",
"edgen",
")",
":",
"icts",
".",
"append",
"(",
"np",
".",
"random",
".",
"poisson",
"(",
"lam",
"[",
"n",
"]",
",",
"size",
"=",
"ncontacts",
"[",
"n",
"]",
")",
")",
"if",
"sum",
"(",
"icts",
"[",
"-",
"1",
"]",
")",
">",
"ict_max",
":",
"ict_max",
"=",
"sum",
"(",
"icts",
"[",
"-",
"1",
"]",
")",
"net",
"=",
"np",
".",
"zeros",
"(",
"[",
"nnodes",
",",
"ict_max",
"+",
"1",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"nnodes",
")",
":",
"net",
"[",
"n",
",",
"np",
".",
"unique",
"(",
"np",
".",
"cumsum",
"(",
"icts",
"[",
"n",
"]",
")",
")",
"]",
"=",
"1",
"if",
"nettype",
"==",
"'bu'",
":",
"nettmp",
"=",
"np",
".",
"zeros",
"(",
"[",
"nnodes",
",",
"nnodes",
",",
"net",
".",
"shape",
"[",
"-",
"1",
"]",
"]",
")",
"ind",
"=",
"np",
".",
"triu_indices",
"(",
"nnodes",
",",
"k",
"=",
"1",
")",
"nettmp",
"[",
"ind",
"[",
"0",
"]",
",",
"ind",
"[",
"1",
"]",
",",
":",
"]",
"=",
"net",
"net",
"=",
"nettmp",
"+",
"nettmp",
".",
"transpose",
"(",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
"elif",
"nettype",
"==",
"'bd'",
":",
"net",
"=",
"net",
".",
"reshape",
"(",
"[",
"nnodes",
",",
"nnodes",
",",
"net",
".",
"shape",
"[",
"-",
"1",
"]",
"]",
",",
"order",
"=",
"'F'",
")",
"net",
"=",
"set_diagonal",
"(",
"net",
",",
"0",
")",
"if",
"netrep",
"==",
"'contact'",
":",
"if",
"not",
"netinfo",
":",
"netinfo",
"=",
"{",
"}",
"netinfo",
"[",
"'nettype'",
"]",
"=",
"'b'",
"+",
"nettype",
"[",
"-",
"1",
"]",
"net",
"=",
"graphlet2contact",
"(",
"net",
",",
"netinfo",
")",
"return",
"net"
] | Generate a random network where intervals between contacts are distributed by a poisson distribution
Parameters
----------
nnodes : int
Number of nodes in networks
ncontacts : int or list
Number of expected contacts (i.e. edges). If list, number of contacts for each node.
Any zeros drawn are ignored so returned degree of network can be smaller than ncontacts.
lam : int or list
Expectation of interval.
nettype : str
'bu' or 'bd'
netinfo : dict
Dictionary of additional information
netrep : str
How the output should be.
If ncontacts is a list, so should lam.
Returns
-------
net : array or dict
Random network with intervals between active edges being Poisson distributed. | [
"Generate",
"a",
"random",
"network",
"where",
"intervals",
"between",
"contacts",
"are",
"distributed",
"by",
"a",
"poisson",
"distribution"
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/generatenetwork/rand_poisson.py#L9-L92 |
483 | wiheto/teneto | teneto/networkmeasures/temporal_efficiency.py | temporal_efficiency | def temporal_efficiency(tnet=None, paths=None, calc='global'):
r"""
Returns temporal efficiency estimate. BU networks only.
Parameters
----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
calc : str
Options: 'global' (default) - measure averages over time and nodes;
'node' or 'node_from' average over nodes (i) and time. Giving average efficiency for i to j;
'node_to' measure average over nodes j and time;
Giving average efficiency using paths to j from i;
Returns
-------
E : array
Global temporal efficiency
"""
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
# Calculate efficiency which is 1 over the mean path.
if calc == 'global':
eff = 1 / np.nanmean(pathmat)
elif calc == 'node' or calc == 'node_from':
eff = 1 / np.nanmean(np.nanmean(pathmat, axis=2), axis=1)
elif calc == 'node_to':
eff = 1 / np.nanmean(np.nanmean(pathmat, axis=2), axis=0)
return eff | python | def temporal_efficiency(tnet=None, paths=None, calc='global'):
r"""
Returns temporal efficiency estimate. BU networks only.
Parameters
----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
calc : str
Options: 'global' (default) - measure averages over time and nodes;
'node' or 'node_from' average over nodes (i) and time. Giving average efficiency for i to j;
'node_to' measure average over nodes j and time;
Giving average efficiency using paths to j from i;
Returns
-------
E : array
Global temporal efficiency
"""
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
# Calculate efficiency which is 1 over the mean path.
if calc == 'global':
eff = 1 / np.nanmean(pathmat)
elif calc == 'node' or calc == 'node_from':
eff = 1 / np.nanmean(np.nanmean(pathmat, axis=2), axis=1)
elif calc == 'node_to':
eff = 1 / np.nanmean(np.nanmean(pathmat, axis=2), axis=0)
return eff | [
"def",
"temporal_efficiency",
"(",
"tnet",
"=",
"None",
",",
"paths",
"=",
"None",
",",
"calc",
"=",
"'global'",
")",
":",
"if",
"tnet",
"is",
"not",
"None",
"and",
"paths",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'Only network or path input allowed.'",
")",
"if",
"tnet",
"is",
"None",
"and",
"paths",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No input.'",
")",
"# if shortest paths are not calculated, calculate them",
"if",
"tnet",
"is",
"not",
"None",
":",
"paths",
"=",
"shortest_temporal_path",
"(",
"tnet",
")",
"pathmat",
"=",
"np",
".",
"zeros",
"(",
"[",
"paths",
"[",
"[",
"'from'",
",",
"'to'",
"]",
"]",
".",
"max",
"(",
")",
".",
"max",
"(",
")",
"+",
"1",
",",
"paths",
"[",
"[",
"'from'",
",",
"'to'",
"]",
"]",
".",
"max",
"(",
")",
".",
"max",
"(",
")",
"+",
"1",
",",
"paths",
"[",
"[",
"'t_start'",
"]",
"]",
".",
"max",
"(",
")",
".",
"max",
"(",
")",
"+",
"1",
"]",
")",
"*",
"np",
".",
"nan",
"pathmat",
"[",
"paths",
"[",
"'from'",
"]",
".",
"values",
",",
"paths",
"[",
"'to'",
"]",
".",
"values",
",",
"paths",
"[",
"'t_start'",
"]",
".",
"values",
"]",
"=",
"paths",
"[",
"'temporal-distance'",
"]",
"# Calculate efficiency which is 1 over the mean path.",
"if",
"calc",
"==",
"'global'",
":",
"eff",
"=",
"1",
"/",
"np",
".",
"nanmean",
"(",
"pathmat",
")",
"elif",
"calc",
"==",
"'node'",
"or",
"calc",
"==",
"'node_from'",
":",
"eff",
"=",
"1",
"/",
"np",
".",
"nanmean",
"(",
"np",
".",
"nanmean",
"(",
"pathmat",
",",
"axis",
"=",
"2",
")",
",",
"axis",
"=",
"1",
")",
"elif",
"calc",
"==",
"'node_to'",
":",
"eff",
"=",
"1",
"/",
"np",
".",
"nanmean",
"(",
"np",
".",
"nanmean",
"(",
"pathmat",
",",
"axis",
"=",
"2",
")",
",",
"axis",
"=",
"0",
")",
"return",
"eff"
] | r"""
Returns temporal efficiency estimate. BU networks only.
Parameters
----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
calc : str
Options: 'global' (default) - measure averages over time and nodes;
'node' or 'node_from' average over nodes (i) and time. Giving average efficiency for i to j;
'node_to' measure average over nodes j and time;
Giving average efficiency using paths to j from i;
Returns
-------
E : array
Global temporal efficiency | [
"r",
"Returns",
"temporal",
"efficiency",
"estimate",
".",
"BU",
"networks",
"only",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/temporal_efficiency.py#L9-L60 |
484 | wiheto/teneto | teneto/classes/network.py | TemporalNetwork.network_from_array | def network_from_array(self, array):
"""impo
Defines a network from an array.
Parameters
----------
array : array
3D numpy array.
"""
if len(array.shape) == 2:
array = np.array(array, ndmin=3).transpose([1, 2, 0])
teneto.utils.check_TemporalNetwork_input(array, 'array')
uvals = np.unique(array)
if len(uvals) == 2 and 1 in uvals and 0 in uvals:
i, j, t = np.where(array == 1)
self.network = pd.DataFrame(data={'i': i, 'j': j, 't': t})
else:
i, j, t = np.where(array != 0)
w = array[array != 0]
self.network = pd.DataFrame(
data={'i': i, 'j': j, 't': t, 'weight': w})
self.N = int(array.shape[0])
self.T = int(array.shape[-1])
self._update_network() | python | def network_from_array(self, array):
if len(array.shape) == 2:
array = np.array(array, ndmin=3).transpose([1, 2, 0])
teneto.utils.check_TemporalNetwork_input(array, 'array')
uvals = np.unique(array)
if len(uvals) == 2 and 1 in uvals and 0 in uvals:
i, j, t = np.where(array == 1)
self.network = pd.DataFrame(data={'i': i, 'j': j, 't': t})
else:
i, j, t = np.where(array != 0)
w = array[array != 0]
self.network = pd.DataFrame(
data={'i': i, 'j': j, 't': t, 'weight': w})
self.N = int(array.shape[0])
self.T = int(array.shape[-1])
self._update_network() | [
"def",
"network_from_array",
"(",
"self",
",",
"array",
")",
":",
"if",
"len",
"(",
"array",
".",
"shape",
")",
"==",
"2",
":",
"array",
"=",
"np",
".",
"array",
"(",
"array",
",",
"ndmin",
"=",
"3",
")",
".",
"transpose",
"(",
"[",
"1",
",",
"2",
",",
"0",
"]",
")",
"teneto",
".",
"utils",
".",
"check_TemporalNetwork_input",
"(",
"array",
",",
"'array'",
")",
"uvals",
"=",
"np",
".",
"unique",
"(",
"array",
")",
"if",
"len",
"(",
"uvals",
")",
"==",
"2",
"and",
"1",
"in",
"uvals",
"and",
"0",
"in",
"uvals",
":",
"i",
",",
"j",
",",
"t",
"=",
"np",
".",
"where",
"(",
"array",
"==",
"1",
")",
"self",
".",
"network",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"{",
"'i'",
":",
"i",
",",
"'j'",
":",
"j",
",",
"'t'",
":",
"t",
"}",
")",
"else",
":",
"i",
",",
"j",
",",
"t",
"=",
"np",
".",
"where",
"(",
"array",
"!=",
"0",
")",
"w",
"=",
"array",
"[",
"array",
"!=",
"0",
"]",
"self",
".",
"network",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"{",
"'i'",
":",
"i",
",",
"'j'",
":",
"j",
",",
"'t'",
":",
"t",
",",
"'weight'",
":",
"w",
"}",
")",
"self",
".",
"N",
"=",
"int",
"(",
"array",
".",
"shape",
"[",
"0",
"]",
")",
"self",
".",
"T",
"=",
"int",
"(",
"array",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"self",
".",
"_update_network",
"(",
")"
] | impo
Defines a network from an array.
Parameters
----------
array : array
3D numpy array. | [
"impo",
"Defines",
"a",
"network",
"from",
"an",
"array",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L179-L202 |
485 | wiheto/teneto | teneto/classes/network.py | TemporalNetwork._drop_duplicate_ij | def _drop_duplicate_ij(self):
"""
Drops duplicate entries from the network dataframe.
"""
self.network['ij'] = list(map(lambda x: tuple(sorted(x)), list(
zip(*[self.network['i'].values, self.network['j'].values]))))
self.network.drop_duplicates(['ij', 't'], inplace=True)
self.network.reset_index(inplace=True, drop=True)
self.network.drop('ij', inplace=True, axis=1) | python | def _drop_duplicate_ij(self):
self.network['ij'] = list(map(lambda x: tuple(sorted(x)), list(
zip(*[self.network['i'].values, self.network['j'].values]))))
self.network.drop_duplicates(['ij', 't'], inplace=True)
self.network.reset_index(inplace=True, drop=True)
self.network.drop('ij', inplace=True, axis=1) | [
"def",
"_drop_duplicate_ij",
"(",
"self",
")",
":",
"self",
".",
"network",
"[",
"'ij'",
"]",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"tuple",
"(",
"sorted",
"(",
"x",
")",
")",
",",
"list",
"(",
"zip",
"(",
"*",
"[",
"self",
".",
"network",
"[",
"'i'",
"]",
".",
"values",
",",
"self",
".",
"network",
"[",
"'j'",
"]",
".",
"values",
"]",
")",
")",
")",
")",
"self",
".",
"network",
".",
"drop_duplicates",
"(",
"[",
"'ij'",
",",
"'t'",
"]",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"network",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"self",
".",
"network",
".",
"drop",
"(",
"'ij'",
",",
"inplace",
"=",
"True",
",",
"axis",
"=",
"1",
")"
] | Drops duplicate entries from the network dataframe. | [
"Drops",
"duplicate",
"entries",
"from",
"the",
"network",
"dataframe",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L260-L268 |
486 | wiheto/teneto | teneto/classes/network.py | TemporalNetwork._drop_diagonal | def _drop_diagonal(self):
"""
Drops self-contacts from the network dataframe.
"""
self.network = self.network.where(
self.network['i'] != self.network['j']).dropna()
self.network.reset_index(inplace=True, drop=True) | python | def _drop_diagonal(self):
self.network = self.network.where(
self.network['i'] != self.network['j']).dropna()
self.network.reset_index(inplace=True, drop=True) | [
"def",
"_drop_diagonal",
"(",
"self",
")",
":",
"self",
".",
"network",
"=",
"self",
".",
"network",
".",
"where",
"(",
"self",
".",
"network",
"[",
"'i'",
"]",
"!=",
"self",
".",
"network",
"[",
"'j'",
"]",
")",
".",
"dropna",
"(",
")",
"self",
".",
"network",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")"
] | Drops self-contacts from the network dataframe. | [
"Drops",
"self",
"-",
"contacts",
"from",
"the",
"network",
"dataframe",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L270-L276 |
487 | wiheto/teneto | teneto/classes/network.py | TemporalNetwork.add_edge | def add_edge(self, edgelist):
"""
Adds an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be added. For weighted networks list should also contain a 'weight' key.
Returns
--------
Updates TenetoBIDS.network dataframe with new edge
"""
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
elif len(edgelist[0]) == 3:
colnames = ['i', 'j', 't']
if self.hdf5:
with pd.HDFStore(self.network) as hdf:
rows = hdf.get_storer('network').nrows
hdf.append('network', pd.DataFrame(edgelist, columns=colnames, index=np.arange(
rows, rows+len(edgelist))), format='table', data_columns=True)
edgelist = np.array(edgelist)
if np.max(edgelist[:, :2]) > self.netshape[0]:
self.netshape[0] = np.max(edgelist[:, :2])
if np.max(edgelist[:, 2]) > self.netshape[1]:
self.netshape[1] = np.max(edgelist[:, 2])
else:
newedges = pd.DataFrame(edgelist, columns=colnames)
self.network = pd.concat(
[self.network, newedges], ignore_index=True, sort=True)
self._update_network() | python | def add_edge(self, edgelist):
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
elif len(edgelist[0]) == 3:
colnames = ['i', 'j', 't']
if self.hdf5:
with pd.HDFStore(self.network) as hdf:
rows = hdf.get_storer('network').nrows
hdf.append('network', pd.DataFrame(edgelist, columns=colnames, index=np.arange(
rows, rows+len(edgelist))), format='table', data_columns=True)
edgelist = np.array(edgelist)
if np.max(edgelist[:, :2]) > self.netshape[0]:
self.netshape[0] = np.max(edgelist[:, :2])
if np.max(edgelist[:, 2]) > self.netshape[1]:
self.netshape[1] = np.max(edgelist[:, 2])
else:
newedges = pd.DataFrame(edgelist, columns=colnames)
self.network = pd.concat(
[self.network, newedges], ignore_index=True, sort=True)
self._update_network() | [
"def",
"add_edge",
"(",
"self",
",",
"edgelist",
")",
":",
"if",
"not",
"isinstance",
"(",
"edgelist",
"[",
"0",
"]",
",",
"list",
")",
":",
"edgelist",
"=",
"[",
"edgelist",
"]",
"teneto",
".",
"utils",
".",
"check_TemporalNetwork_input",
"(",
"edgelist",
",",
"'edgelist'",
")",
"if",
"len",
"(",
"edgelist",
"[",
"0",
"]",
")",
"==",
"4",
":",
"colnames",
"=",
"[",
"'i'",
",",
"'j'",
",",
"'t'",
",",
"'weight'",
"]",
"elif",
"len",
"(",
"edgelist",
"[",
"0",
"]",
")",
"==",
"3",
":",
"colnames",
"=",
"[",
"'i'",
",",
"'j'",
",",
"'t'",
"]",
"if",
"self",
".",
"hdf5",
":",
"with",
"pd",
".",
"HDFStore",
"(",
"self",
".",
"network",
")",
"as",
"hdf",
":",
"rows",
"=",
"hdf",
".",
"get_storer",
"(",
"'network'",
")",
".",
"nrows",
"hdf",
".",
"append",
"(",
"'network'",
",",
"pd",
".",
"DataFrame",
"(",
"edgelist",
",",
"columns",
"=",
"colnames",
",",
"index",
"=",
"np",
".",
"arange",
"(",
"rows",
",",
"rows",
"+",
"len",
"(",
"edgelist",
")",
")",
")",
",",
"format",
"=",
"'table'",
",",
"data_columns",
"=",
"True",
")",
"edgelist",
"=",
"np",
".",
"array",
"(",
"edgelist",
")",
"if",
"np",
".",
"max",
"(",
"edgelist",
"[",
":",
",",
":",
"2",
"]",
")",
">",
"self",
".",
"netshape",
"[",
"0",
"]",
":",
"self",
".",
"netshape",
"[",
"0",
"]",
"=",
"np",
".",
"max",
"(",
"edgelist",
"[",
":",
",",
":",
"2",
"]",
")",
"if",
"np",
".",
"max",
"(",
"edgelist",
"[",
":",
",",
"2",
"]",
")",
">",
"self",
".",
"netshape",
"[",
"1",
"]",
":",
"self",
".",
"netshape",
"[",
"1",
"]",
"=",
"np",
".",
"max",
"(",
"edgelist",
"[",
":",
",",
"2",
"]",
")",
"else",
":",
"newedges",
"=",
"pd",
".",
"DataFrame",
"(",
"edgelist",
",",
"columns",
"=",
"colnames",
")",
"self",
".",
"network",
"=",
"pd",
".",
"concat",
"(",
"[",
"self",
".",
"network",
",",
"newedges",
"]",
",",
"ignore_index",
"=",
"True",
",",
"sort",
"=",
"True",
")",
"self",
".",
"_update_network",
"(",
")"
] | Adds an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be added. For weighted networks list should also contain a 'weight' key.
Returns
--------
Updates TenetoBIDS.network dataframe with new edge | [
"Adds",
"an",
"edge",
"from",
"network",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L297-L332 |
488 | wiheto/teneto | teneto/classes/network.py | TemporalNetwork.drop_edge | def drop_edge(self, edgelist):
"""
Removes an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be removes.
Returns
--------
Updates TenetoBIDS.network dataframe
"""
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if self.hdf5:
with pd.HDFStore(self.network) as hdf:
for e in edgelist:
hdf.remove(
'network', 'i == ' + str(e[0]) + ' & ' + 'j == ' + str(e[1]) + ' & ' + 't == ' + str(e[2]))
print('HDF5 delete warning. This will not reduce the size of the file.')
else:
for e in edgelist:
idx = self.network[(self.network['i'] == e[0]) & (
self.network['j'] == e[1]) & (self.network['t'] == e[2])].index
self.network.drop(idx, inplace=True)
self.network.reset_index(inplace=True, drop=True)
self._update_network() | python | def drop_edge(self, edgelist):
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if self.hdf5:
with pd.HDFStore(self.network) as hdf:
for e in edgelist:
hdf.remove(
'network', 'i == ' + str(e[0]) + ' & ' + 'j == ' + str(e[1]) + ' & ' + 't == ' + str(e[2]))
print('HDF5 delete warning. This will not reduce the size of the file.')
else:
for e in edgelist:
idx = self.network[(self.network['i'] == e[0]) & (
self.network['j'] == e[1]) & (self.network['t'] == e[2])].index
self.network.drop(idx, inplace=True)
self.network.reset_index(inplace=True, drop=True)
self._update_network() | [
"def",
"drop_edge",
"(",
"self",
",",
"edgelist",
")",
":",
"if",
"not",
"isinstance",
"(",
"edgelist",
"[",
"0",
"]",
",",
"list",
")",
":",
"edgelist",
"=",
"[",
"edgelist",
"]",
"teneto",
".",
"utils",
".",
"check_TemporalNetwork_input",
"(",
"edgelist",
",",
"'edgelist'",
")",
"if",
"self",
".",
"hdf5",
":",
"with",
"pd",
".",
"HDFStore",
"(",
"self",
".",
"network",
")",
"as",
"hdf",
":",
"for",
"e",
"in",
"edgelist",
":",
"hdf",
".",
"remove",
"(",
"'network'",
",",
"'i == '",
"+",
"str",
"(",
"e",
"[",
"0",
"]",
")",
"+",
"' & '",
"+",
"'j == '",
"+",
"str",
"(",
"e",
"[",
"1",
"]",
")",
"+",
"' & '",
"+",
"'t == '",
"+",
"str",
"(",
"e",
"[",
"2",
"]",
")",
")",
"print",
"(",
"'HDF5 delete warning. This will not reduce the size of the file.'",
")",
"else",
":",
"for",
"e",
"in",
"edgelist",
":",
"idx",
"=",
"self",
".",
"network",
"[",
"(",
"self",
".",
"network",
"[",
"'i'",
"]",
"==",
"e",
"[",
"0",
"]",
")",
"&",
"(",
"self",
".",
"network",
"[",
"'j'",
"]",
"==",
"e",
"[",
"1",
"]",
")",
"&",
"(",
"self",
".",
"network",
"[",
"'t'",
"]",
"==",
"e",
"[",
"2",
"]",
")",
"]",
".",
"index",
"self",
".",
"network",
".",
"drop",
"(",
"idx",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"network",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"self",
".",
"_update_network",
"(",
")"
] | Removes an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be removes.
Returns
--------
Updates TenetoBIDS.network dataframe | [
"Removes",
"an",
"edge",
"from",
"network",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L334-L363 |
489 | wiheto/teneto | teneto/classes/network.py | TemporalNetwork.calc_networkmeasure | def calc_networkmeasure(self, networkmeasure, **measureparams):
"""
Calculate network measure.
Parameters
-----------
networkmeasure : str
Function to call. Functions available are in teneto.networkmeasures
measureparams : kwargs
kwargs for teneto.networkmeasure.[networkmeasure]
"""
availablemeasures = [f for f in dir(
teneto.networkmeasures) if not f.startswith('__')]
if networkmeasure not in availablemeasures:
raise ValueError(
'Unknown network measure. Available network measures are: ' + ', '.join(availablemeasures))
funs = inspect.getmembers(teneto.networkmeasures)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
measure = funs[networkmeasure](self, **measureparams)
return measure | python | def calc_networkmeasure(self, networkmeasure, **measureparams):
availablemeasures = [f for f in dir(
teneto.networkmeasures) if not f.startswith('__')]
if networkmeasure not in availablemeasures:
raise ValueError(
'Unknown network measure. Available network measures are: ' + ', '.join(availablemeasures))
funs = inspect.getmembers(teneto.networkmeasures)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
measure = funs[networkmeasure](self, **measureparams)
return measure | [
"def",
"calc_networkmeasure",
"(",
"self",
",",
"networkmeasure",
",",
"*",
"*",
"measureparams",
")",
":",
"availablemeasures",
"=",
"[",
"f",
"for",
"f",
"in",
"dir",
"(",
"teneto",
".",
"networkmeasures",
")",
"if",
"not",
"f",
".",
"startswith",
"(",
"'__'",
")",
"]",
"if",
"networkmeasure",
"not",
"in",
"availablemeasures",
":",
"raise",
"ValueError",
"(",
"'Unknown network measure. Available network measures are: '",
"+",
"', '",
".",
"join",
"(",
"availablemeasures",
")",
")",
"funs",
"=",
"inspect",
".",
"getmembers",
"(",
"teneto",
".",
"networkmeasures",
")",
"funs",
"=",
"{",
"m",
"[",
"0",
"]",
":",
"m",
"[",
"1",
"]",
"for",
"m",
"in",
"funs",
"if",
"not",
"m",
"[",
"0",
"]",
".",
"startswith",
"(",
"'__'",
")",
"}",
"measure",
"=",
"funs",
"[",
"networkmeasure",
"]",
"(",
"self",
",",
"*",
"*",
"measureparams",
")",
"return",
"measure"
] | Calculate network measure.
Parameters
-----------
networkmeasure : str
Function to call. Functions available are in teneto.networkmeasures
measureparams : kwargs
kwargs for teneto.networkmeasure.[networkmeasure] | [
"Calculate",
"network",
"measure",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L365-L385 |
490 | wiheto/teneto | teneto/classes/network.py | TemporalNetwork.generatenetwork | def generatenetwork(self, networktype, **networkparams):
"""
Generate a network
Parameters
-----------
networktype : str
Function to call. Functions available are in teneto.generatenetwork
measureparams : kwargs
kwargs for teneto.generatenetwork.[networktype]
Returns
--------
TenetoBIDS.network is made with the generated network.
"""
availabletypes = [f for f in dir(
teneto.generatenetwork) if not f.startswith('__')]
if networktype not in availabletypes:
raise ValueError(
'Unknown network measure. Available networks to generate are: ' + ', '.join(availabletypes))
funs = inspect.getmembers(teneto.generatenetwork)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
network = funs[networktype](**networkparams)
self.network_from_array(network)
if self.nettype[1] == 'u':
self._drop_duplicate_ij() | python | def generatenetwork(self, networktype, **networkparams):
availabletypes = [f for f in dir(
teneto.generatenetwork) if not f.startswith('__')]
if networktype not in availabletypes:
raise ValueError(
'Unknown network measure. Available networks to generate are: ' + ', '.join(availabletypes))
funs = inspect.getmembers(teneto.generatenetwork)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
network = funs[networktype](**networkparams)
self.network_from_array(network)
if self.nettype[1] == 'u':
self._drop_duplicate_ij() | [
"def",
"generatenetwork",
"(",
"self",
",",
"networktype",
",",
"*",
"*",
"networkparams",
")",
":",
"availabletypes",
"=",
"[",
"f",
"for",
"f",
"in",
"dir",
"(",
"teneto",
".",
"generatenetwork",
")",
"if",
"not",
"f",
".",
"startswith",
"(",
"'__'",
")",
"]",
"if",
"networktype",
"not",
"in",
"availabletypes",
":",
"raise",
"ValueError",
"(",
"'Unknown network measure. Available networks to generate are: '",
"+",
"', '",
".",
"join",
"(",
"availabletypes",
")",
")",
"funs",
"=",
"inspect",
".",
"getmembers",
"(",
"teneto",
".",
"generatenetwork",
")",
"funs",
"=",
"{",
"m",
"[",
"0",
"]",
":",
"m",
"[",
"1",
"]",
"for",
"m",
"in",
"funs",
"if",
"not",
"m",
"[",
"0",
"]",
".",
"startswith",
"(",
"'__'",
")",
"}",
"network",
"=",
"funs",
"[",
"networktype",
"]",
"(",
"*",
"*",
"networkparams",
")",
"self",
".",
"network_from_array",
"(",
"network",
")",
"if",
"self",
".",
"nettype",
"[",
"1",
"]",
"==",
"'u'",
":",
"self",
".",
"_drop_duplicate_ij",
"(",
")"
] | Generate a network
Parameters
-----------
networktype : str
Function to call. Functions available are in teneto.generatenetwork
measureparams : kwargs
kwargs for teneto.generatenetwork.[networktype]
Returns
--------
TenetoBIDS.network is made with the generated network. | [
"Generate",
"a",
"network"
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L387-L413 |
491 | wiheto/teneto | teneto/classes/network.py | TemporalNetwork.save_aspickle | def save_aspickle(self, fname):
"""
Saves object as pickle.
fname : str
file path.
"""
if fname[-4:] != '.pkl':
fname += '.pkl'
with open(fname, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) | python | def save_aspickle(self, fname):
if fname[-4:] != '.pkl':
fname += '.pkl'
with open(fname, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) | [
"def",
"save_aspickle",
"(",
"self",
",",
"fname",
")",
":",
"if",
"fname",
"[",
"-",
"4",
":",
"]",
"!=",
"'.pkl'",
":",
"fname",
"+=",
"'.pkl'",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"self",
",",
"f",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")"
] | Saves object as pickle.
fname : str
file path. | [
"Saves",
"object",
"as",
"pickle",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L441-L451 |
492 | wiheto/teneto | teneto/timeseries/postprocess.py | postpro_fisher | def postpro_fisher(data, report=None):
"""
Performs fisher transform on everything in data.
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# Due to rounding errors
data[data < -0.99999999999999] = -1
data[data > 0.99999999999999] = 1
fisher_data = 0.5 * np.log((1 + data) / (1 - data))
report['fisher'] = {}
report['fisher']['performed'] = 'yes'
#report['fisher']['diagonal'] = 'zeroed'
return fisher_data, report | python | def postpro_fisher(data, report=None):
if not report:
report = {}
# Due to rounding errors
data[data < -0.99999999999999] = -1
data[data > 0.99999999999999] = 1
fisher_data = 0.5 * np.log((1 + data) / (1 - data))
report['fisher'] = {}
report['fisher']['performed'] = 'yes'
#report['fisher']['diagonal'] = 'zeroed'
return fisher_data, report | [
"def",
"postpro_fisher",
"(",
"data",
",",
"report",
"=",
"None",
")",
":",
"if",
"not",
"report",
":",
"report",
"=",
"{",
"}",
"# Due to rounding errors",
"data",
"[",
"data",
"<",
"-",
"0.99999999999999",
"]",
"=",
"-",
"1",
"data",
"[",
"data",
">",
"0.99999999999999",
"]",
"=",
"1",
"fisher_data",
"=",
"0.5",
"*",
"np",
".",
"log",
"(",
"(",
"1",
"+",
"data",
")",
"/",
"(",
"1",
"-",
"data",
")",
")",
"report",
"[",
"'fisher'",
"]",
"=",
"{",
"}",
"report",
"[",
"'fisher'",
"]",
"[",
"'performed'",
"]",
"=",
"'yes'",
"#report['fisher']['diagonal'] = 'zeroed'",
"return",
"fisher_data",
",",
"report"
] | Performs fisher transform on everything in data.
If report variable is passed, this is added to the report. | [
"Performs",
"fisher",
"transform",
"on",
"everything",
"in",
"data",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/postprocess.py#L10-L25 |
493 | wiheto/teneto | teneto/timeseries/postprocess.py | postpro_boxcox | def postpro_boxcox(data, report=None):
"""
Performs box cox transform on everything in data.
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# Note the min value of all time series will now be at least 1.
mindata = 1 - np.nanmin(data)
data = data + mindata
ind = np.triu_indices(data.shape[0], k=1)
boxcox_list = np.array([sp.stats.boxcox(np.squeeze(
data[ind[0][n], ind[1][n], :])) for n in range(0, len(ind[0]))])
boxcox_data = np.zeros(data.shape)
boxcox_data[ind[0], ind[1], :] = np.vstack(boxcox_list[:, 0])
boxcox_data[ind[1], ind[0], :] = np.vstack(boxcox_list[:, 0])
bccheck = np.array(np.transpose(boxcox_data, [2, 0, 1]))
bccheck = (bccheck - bccheck.mean(axis=0)) / bccheck.std(axis=0)
bccheck = np.squeeze(np.mean(bccheck, axis=0))
np.fill_diagonal(bccheck, 0)
report['boxcox'] = {}
report['boxcox']['performed'] = 'yes'
report['boxcox']['lambda'] = [
tuple([ind[0][n], ind[1][n], boxcox_list[n, -1]]) for n in range(0, len(ind[0]))]
report['boxcox']['shift'] = mindata
report['boxcox']['shited_to'] = 1
if np.sum(np.isnan(bccheck)) > 0:
report['boxcox'] = {}
report['boxcox']['performed'] = 'FAILED'
report['boxcox']['failure_reason'] = (
'Box cox transform is returning edges with uniform values through time. '
'This is probabaly due to one or more outliers or a very skewed distribution. '
'Have you corrected for sources of noise (e.g. movement)? '
'If yes, some time-series might need additional transforms to approximate to Gaussian.'
)
report['boxcox']['failure_consequence'] = (
'Box cox transform was skipped from the postprocess pipeline.'
)
boxcox_data = data - mindata
error_msg = ('TENETO WARNING: Box Cox transform problem. \n'
'Box Cox transform not performed. \n'
'See report for more details.')
print(error_msg)
return boxcox_data, report | python | def postpro_boxcox(data, report=None):
if not report:
report = {}
# Note the min value of all time series will now be at least 1.
mindata = 1 - np.nanmin(data)
data = data + mindata
ind = np.triu_indices(data.shape[0], k=1)
boxcox_list = np.array([sp.stats.boxcox(np.squeeze(
data[ind[0][n], ind[1][n], :])) for n in range(0, len(ind[0]))])
boxcox_data = np.zeros(data.shape)
boxcox_data[ind[0], ind[1], :] = np.vstack(boxcox_list[:, 0])
boxcox_data[ind[1], ind[0], :] = np.vstack(boxcox_list[:, 0])
bccheck = np.array(np.transpose(boxcox_data, [2, 0, 1]))
bccheck = (bccheck - bccheck.mean(axis=0)) / bccheck.std(axis=0)
bccheck = np.squeeze(np.mean(bccheck, axis=0))
np.fill_diagonal(bccheck, 0)
report['boxcox'] = {}
report['boxcox']['performed'] = 'yes'
report['boxcox']['lambda'] = [
tuple([ind[0][n], ind[1][n], boxcox_list[n, -1]]) for n in range(0, len(ind[0]))]
report['boxcox']['shift'] = mindata
report['boxcox']['shited_to'] = 1
if np.sum(np.isnan(bccheck)) > 0:
report['boxcox'] = {}
report['boxcox']['performed'] = 'FAILED'
report['boxcox']['failure_reason'] = (
'Box cox transform is returning edges with uniform values through time. '
'This is probabaly due to one or more outliers or a very skewed distribution. '
'Have you corrected for sources of noise (e.g. movement)? '
'If yes, some time-series might need additional transforms to approximate to Gaussian.'
)
report['boxcox']['failure_consequence'] = (
'Box cox transform was skipped from the postprocess pipeline.'
)
boxcox_data = data - mindata
error_msg = ('TENETO WARNING: Box Cox transform problem. \n'
'Box Cox transform not performed. \n'
'See report for more details.')
print(error_msg)
return boxcox_data, report | [
"def",
"postpro_boxcox",
"(",
"data",
",",
"report",
"=",
"None",
")",
":",
"if",
"not",
"report",
":",
"report",
"=",
"{",
"}",
"# Note the min value of all time series will now be at least 1.",
"mindata",
"=",
"1",
"-",
"np",
".",
"nanmin",
"(",
"data",
")",
"data",
"=",
"data",
"+",
"mindata",
"ind",
"=",
"np",
".",
"triu_indices",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"k",
"=",
"1",
")",
"boxcox_list",
"=",
"np",
".",
"array",
"(",
"[",
"sp",
".",
"stats",
".",
"boxcox",
"(",
"np",
".",
"squeeze",
"(",
"data",
"[",
"ind",
"[",
"0",
"]",
"[",
"n",
"]",
",",
"ind",
"[",
"1",
"]",
"[",
"n",
"]",
",",
":",
"]",
")",
")",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"ind",
"[",
"0",
"]",
")",
")",
"]",
")",
"boxcox_data",
"=",
"np",
".",
"zeros",
"(",
"data",
".",
"shape",
")",
"boxcox_data",
"[",
"ind",
"[",
"0",
"]",
",",
"ind",
"[",
"1",
"]",
",",
":",
"]",
"=",
"np",
".",
"vstack",
"(",
"boxcox_list",
"[",
":",
",",
"0",
"]",
")",
"boxcox_data",
"[",
"ind",
"[",
"1",
"]",
",",
"ind",
"[",
"0",
"]",
",",
":",
"]",
"=",
"np",
".",
"vstack",
"(",
"boxcox_list",
"[",
":",
",",
"0",
"]",
")",
"bccheck",
"=",
"np",
".",
"array",
"(",
"np",
".",
"transpose",
"(",
"boxcox_data",
",",
"[",
"2",
",",
"0",
",",
"1",
"]",
")",
")",
"bccheck",
"=",
"(",
"bccheck",
"-",
"bccheck",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
")",
"/",
"bccheck",
".",
"std",
"(",
"axis",
"=",
"0",
")",
"bccheck",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"mean",
"(",
"bccheck",
",",
"axis",
"=",
"0",
")",
")",
"np",
".",
"fill_diagonal",
"(",
"bccheck",
",",
"0",
")",
"report",
"[",
"'boxcox'",
"]",
"=",
"{",
"}",
"report",
"[",
"'boxcox'",
"]",
"[",
"'performed'",
"]",
"=",
"'yes'",
"report",
"[",
"'boxcox'",
"]",
"[",
"'lambda'",
"]",
"=",
"[",
"tuple",
"(",
"[",
"ind",
"[",
"0",
"]",
"[",
"n",
"]",
",",
"ind",
"[",
"1",
"]",
"[",
"n",
"]",
",",
"boxcox_list",
"[",
"n",
",",
"-",
"1",
"]",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"ind",
"[",
"0",
"]",
")",
")",
"]",
"report",
"[",
"'boxcox'",
"]",
"[",
"'shift'",
"]",
"=",
"mindata",
"report",
"[",
"'boxcox'",
"]",
"[",
"'shited_to'",
"]",
"=",
"1",
"if",
"np",
".",
"sum",
"(",
"np",
".",
"isnan",
"(",
"bccheck",
")",
")",
">",
"0",
":",
"report",
"[",
"'boxcox'",
"]",
"=",
"{",
"}",
"report",
"[",
"'boxcox'",
"]",
"[",
"'performed'",
"]",
"=",
"'FAILED'",
"report",
"[",
"'boxcox'",
"]",
"[",
"'failure_reason'",
"]",
"=",
"(",
"'Box cox transform is returning edges with uniform values through time. '",
"'This is probabaly due to one or more outliers or a very skewed distribution. '",
"'Have you corrected for sources of noise (e.g. movement)? '",
"'If yes, some time-series might need additional transforms to approximate to Gaussian.'",
")",
"report",
"[",
"'boxcox'",
"]",
"[",
"'failure_consequence'",
"]",
"=",
"(",
"'Box cox transform was skipped from the postprocess pipeline.'",
")",
"boxcox_data",
"=",
"data",
"-",
"mindata",
"error_msg",
"=",
"(",
"'TENETO WARNING: Box Cox transform problem. \\n'",
"'Box Cox transform not performed. \\n'",
"'See report for more details.'",
")",
"print",
"(",
"error_msg",
")",
"return",
"boxcox_data",
",",
"report"
] | Performs box cox transform on everything in data.
If report variable is passed, this is added to the report. | [
"Performs",
"box",
"cox",
"transform",
"on",
"everything",
"in",
"data",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/postprocess.py#L28-L78 |
494 | wiheto/teneto | teneto/utils/utils.py | binarize_rdp | def binarize_rdp(netin, level, sign='pos', axis='time'):
"""
Binarizes a network based on RDP compression.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
level : float
Delta parameter which is the tolorated error in RDP compression.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
Returns
-------
netout : array or dict (dependning on input)
Binarized network
"""
netin, netinfo = process_input(netin, ['C', 'G', 'TO'])
trajectory = rdp(netin, level)
contacts = []
# Use the trajectory points as threshold
for n in range(trajectory['index'].shape[0]):
if sign == 'pos':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] > 0]
elif sign == 'neg':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] < 0]
else:
sel = trajectory['trajectory_points']
i_ind = np.repeat(trajectory['index'][n, 0], len(sel))
j_ind = np.repeat(trajectory['index'][n, 1], len(sel))
contacts.append(np.array([i_ind, j_ind, sel]).transpose())
contacts = np.concatenate(contacts)
# Create output dictionary
netout = dict(netinfo)
netout['contacts'] = contacts
netout['nettype'] = 'b' + netout['nettype'][1]
netout['dimord'] = 'node,node,time'
netout['timetype'] = 'discrete'
netout['diagonal'] = 0
# If input is graphlet, output graphlet
if netinfo['inputtype'] == 'G':
netout = contact2graphlet(netout)
else:
netout.pop('inputtype')
return netout | python | def binarize_rdp(netin, level, sign='pos', axis='time'):
netin, netinfo = process_input(netin, ['C', 'G', 'TO'])
trajectory = rdp(netin, level)
contacts = []
# Use the trajectory points as threshold
for n in range(trajectory['index'].shape[0]):
if sign == 'pos':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] > 0]
elif sign == 'neg':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] < 0]
else:
sel = trajectory['trajectory_points']
i_ind = np.repeat(trajectory['index'][n, 0], len(sel))
j_ind = np.repeat(trajectory['index'][n, 1], len(sel))
contacts.append(np.array([i_ind, j_ind, sel]).transpose())
contacts = np.concatenate(contacts)
# Create output dictionary
netout = dict(netinfo)
netout['contacts'] = contacts
netout['nettype'] = 'b' + netout['nettype'][1]
netout['dimord'] = 'node,node,time'
netout['timetype'] = 'discrete'
netout['diagonal'] = 0
# If input is graphlet, output graphlet
if netinfo['inputtype'] == 'G':
netout = contact2graphlet(netout)
else:
netout.pop('inputtype')
return netout | [
"def",
"binarize_rdp",
"(",
"netin",
",",
"level",
",",
"sign",
"=",
"'pos'",
",",
"axis",
"=",
"'time'",
")",
":",
"netin",
",",
"netinfo",
"=",
"process_input",
"(",
"netin",
",",
"[",
"'C'",
",",
"'G'",
",",
"'TO'",
"]",
")",
"trajectory",
"=",
"rdp",
"(",
"netin",
",",
"level",
")",
"contacts",
"=",
"[",
"]",
"# Use the trajectory points as threshold",
"for",
"n",
"in",
"range",
"(",
"trajectory",
"[",
"'index'",
"]",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"sign",
"==",
"'pos'",
":",
"sel",
"=",
"trajectory",
"[",
"'trajectory_points'",
"]",
"[",
"n",
"]",
"[",
"trajectory",
"[",
"'trajectory'",
"]",
"[",
"n",
"]",
"[",
"trajectory",
"[",
"'trajectory_points'",
"]",
"[",
"n",
"]",
"]",
">",
"0",
"]",
"elif",
"sign",
"==",
"'neg'",
":",
"sel",
"=",
"trajectory",
"[",
"'trajectory_points'",
"]",
"[",
"n",
"]",
"[",
"trajectory",
"[",
"'trajectory'",
"]",
"[",
"n",
"]",
"[",
"trajectory",
"[",
"'trajectory_points'",
"]",
"[",
"n",
"]",
"]",
"<",
"0",
"]",
"else",
":",
"sel",
"=",
"trajectory",
"[",
"'trajectory_points'",
"]",
"i_ind",
"=",
"np",
".",
"repeat",
"(",
"trajectory",
"[",
"'index'",
"]",
"[",
"n",
",",
"0",
"]",
",",
"len",
"(",
"sel",
")",
")",
"j_ind",
"=",
"np",
".",
"repeat",
"(",
"trajectory",
"[",
"'index'",
"]",
"[",
"n",
",",
"1",
"]",
",",
"len",
"(",
"sel",
")",
")",
"contacts",
".",
"append",
"(",
"np",
".",
"array",
"(",
"[",
"i_ind",
",",
"j_ind",
",",
"sel",
"]",
")",
".",
"transpose",
"(",
")",
")",
"contacts",
"=",
"np",
".",
"concatenate",
"(",
"contacts",
")",
"# Create output dictionary",
"netout",
"=",
"dict",
"(",
"netinfo",
")",
"netout",
"[",
"'contacts'",
"]",
"=",
"contacts",
"netout",
"[",
"'nettype'",
"]",
"=",
"'b'",
"+",
"netout",
"[",
"'nettype'",
"]",
"[",
"1",
"]",
"netout",
"[",
"'dimord'",
"]",
"=",
"'node,node,time'",
"netout",
"[",
"'timetype'",
"]",
"=",
"'discrete'",
"netout",
"[",
"'diagonal'",
"]",
"=",
"0",
"# If input is graphlet, output graphlet",
"if",
"netinfo",
"[",
"'inputtype'",
"]",
"==",
"'G'",
":",
"netout",
"=",
"contact2graphlet",
"(",
"netout",
")",
"else",
":",
"netout",
".",
"pop",
"(",
"'inputtype'",
")",
"return",
"netout"
] | Binarizes a network based on RDP compression.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
level : float
Delta parameter which is the tolorated error in RDP compression.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
Returns
-------
netout : array or dict (dependning on input)
Binarized network | [
"Binarizes",
"a",
"network",
"based",
"on",
"RDP",
"compression",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L283-L335 |
495 | wiheto/teneto | teneto/utils/utils.py | binarize | def binarize(netin, threshold_type, threshold_level, sign='pos', axis='time'):
"""
Binarizes a network, returning the network. General wrapper function for different binarization functions.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
threshold_type : str
What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'.
threshold_level : str
Paramter dependent on threshold type.
If 'rdp', it is the delta (i.e. error allowed in compression).
If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal).
If 'magnitude', it is the amplitude of signal to keep.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str
Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet.
Returns
-------
netout : array or dict (depending on input)
Binarized network
"""
if threshold_type == 'percent':
netout = binarize_percent(netin, threshold_level, sign, axis)
elif threshold_type == 'magnitude':
netout = binarize_magnitude(netin, threshold_level, sign)
elif threshold_type == 'rdp':
netout = binarize_rdp(netin, threshold_level, sign, axis)
else:
raise ValueError('Unknown value to parameter: threshold_type.')
return netout | python | def binarize(netin, threshold_type, threshold_level, sign='pos', axis='time'):
if threshold_type == 'percent':
netout = binarize_percent(netin, threshold_level, sign, axis)
elif threshold_type == 'magnitude':
netout = binarize_magnitude(netin, threshold_level, sign)
elif threshold_type == 'rdp':
netout = binarize_rdp(netin, threshold_level, sign, axis)
else:
raise ValueError('Unknown value to parameter: threshold_type.')
return netout | [
"def",
"binarize",
"(",
"netin",
",",
"threshold_type",
",",
"threshold_level",
",",
"sign",
"=",
"'pos'",
",",
"axis",
"=",
"'time'",
")",
":",
"if",
"threshold_type",
"==",
"'percent'",
":",
"netout",
"=",
"binarize_percent",
"(",
"netin",
",",
"threshold_level",
",",
"sign",
",",
"axis",
")",
"elif",
"threshold_type",
"==",
"'magnitude'",
":",
"netout",
"=",
"binarize_magnitude",
"(",
"netin",
",",
"threshold_level",
",",
"sign",
")",
"elif",
"threshold_type",
"==",
"'rdp'",
":",
"netout",
"=",
"binarize_rdp",
"(",
"netin",
",",
"threshold_level",
",",
"sign",
",",
"axis",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown value to parameter: threshold_type.'",
")",
"return",
"netout"
] | Binarizes a network, returning the network. General wrapper function for different binarization functions.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
threshold_type : str
What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'.
threshold_level : str
Paramter dependent on threshold type.
If 'rdp', it is the delta (i.e. error allowed in compression).
If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal).
If 'magnitude', it is the amplitude of signal to keep.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str
Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet.
Returns
-------
netout : array or dict (depending on input)
Binarized network | [
"Binarizes",
"a",
"network",
"returning",
"the",
"network",
".",
"General",
"wrapper",
"function",
"for",
"different",
"binarization",
"functions",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L382-L422 |
496 | wiheto/teneto | teneto/utils/utils.py | process_input | def process_input(netIn, allowedformats, outputformat='G'):
"""
Takes input network and checks what the input is.
Parameters
----------
netIn : array, dict, or TemporalNetwork
Network (graphlet, contact or object)
allowedformats : str
Which format of network objects that are allowed. Options: 'C', 'TN', 'G'.
outputformat: str, default=G
Target output format. Options: 'C' or 'G'.
Returns
-------
C : dict
OR
G : array
Graphlet representation.
netInfo : dict
Metainformation about network.
OR
tnet : object
object of TemporalNetwork class
"""
inputtype = checkInput(netIn)
# Convert TN to G representation
if inputtype == 'TN' and 'TN' in allowedformats and outputformat != 'TN':
G = netIn.df_to_array()
netInfo = {'nettype': netIn.nettype, 'netshape': netIn.netshape}
elif inputtype == 'TN' and 'TN' in allowedformats and outputformat == 'TN':
TN = netIn
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'G':
G = contact2graphlet(netIn)
netInfo = dict(netIn)
netInfo.pop('contacts')
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_dict=netIn)
elif inputtype == 'G' and 'G' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_array=netIn)
# Get network type if not set yet
elif inputtype == 'G' and 'G' in allowedformats:
netInfo = {}
netInfo['netshape'] = netIn.shape
netInfo['nettype'] = gen_nettype(netIn)
G = netIn
elif inputtype == 'C' and outputformat == 'C':
pass
else:
raise ValueError('Input invalid.')
if outputformat == 'TN' and not isinstance(TN.network, str):
TN.network['i'] = TN.network['i'].astype(int)
TN.network['j'] = TN.network['j'].astype(int)
TN.network['t'] = TN.network['t'].astype(int)
if outputformat == 'C' or outputformat == 'G':
netInfo['inputtype'] = inputtype
if inputtype != 'C' and outputformat == 'C':
C = graphlet2contact(G, netInfo)
if outputformat == 'G':
return G, netInfo
elif outputformat == 'C':
return C
elif outputformat == 'TN':
return TN | python | def process_input(netIn, allowedformats, outputformat='G'):
inputtype = checkInput(netIn)
# Convert TN to G representation
if inputtype == 'TN' and 'TN' in allowedformats and outputformat != 'TN':
G = netIn.df_to_array()
netInfo = {'nettype': netIn.nettype, 'netshape': netIn.netshape}
elif inputtype == 'TN' and 'TN' in allowedformats and outputformat == 'TN':
TN = netIn
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'G':
G = contact2graphlet(netIn)
netInfo = dict(netIn)
netInfo.pop('contacts')
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_dict=netIn)
elif inputtype == 'G' and 'G' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_array=netIn)
# Get network type if not set yet
elif inputtype == 'G' and 'G' in allowedformats:
netInfo = {}
netInfo['netshape'] = netIn.shape
netInfo['nettype'] = gen_nettype(netIn)
G = netIn
elif inputtype == 'C' and outputformat == 'C':
pass
else:
raise ValueError('Input invalid.')
if outputformat == 'TN' and not isinstance(TN.network, str):
TN.network['i'] = TN.network['i'].astype(int)
TN.network['j'] = TN.network['j'].astype(int)
TN.network['t'] = TN.network['t'].astype(int)
if outputformat == 'C' or outputformat == 'G':
netInfo['inputtype'] = inputtype
if inputtype != 'C' and outputformat == 'C':
C = graphlet2contact(G, netInfo)
if outputformat == 'G':
return G, netInfo
elif outputformat == 'C':
return C
elif outputformat == 'TN':
return TN | [
"def",
"process_input",
"(",
"netIn",
",",
"allowedformats",
",",
"outputformat",
"=",
"'G'",
")",
":",
"inputtype",
"=",
"checkInput",
"(",
"netIn",
")",
"# Convert TN to G representation",
"if",
"inputtype",
"==",
"'TN'",
"and",
"'TN'",
"in",
"allowedformats",
"and",
"outputformat",
"!=",
"'TN'",
":",
"G",
"=",
"netIn",
".",
"df_to_array",
"(",
")",
"netInfo",
"=",
"{",
"'nettype'",
":",
"netIn",
".",
"nettype",
",",
"'netshape'",
":",
"netIn",
".",
"netshape",
"}",
"elif",
"inputtype",
"==",
"'TN'",
"and",
"'TN'",
"in",
"allowedformats",
"and",
"outputformat",
"==",
"'TN'",
":",
"TN",
"=",
"netIn",
"elif",
"inputtype",
"==",
"'C'",
"and",
"'C'",
"in",
"allowedformats",
"and",
"outputformat",
"==",
"'G'",
":",
"G",
"=",
"contact2graphlet",
"(",
"netIn",
")",
"netInfo",
"=",
"dict",
"(",
"netIn",
")",
"netInfo",
".",
"pop",
"(",
"'contacts'",
")",
"elif",
"inputtype",
"==",
"'C'",
"and",
"'C'",
"in",
"allowedformats",
"and",
"outputformat",
"==",
"'TN'",
":",
"TN",
"=",
"TemporalNetwork",
"(",
"from_dict",
"=",
"netIn",
")",
"elif",
"inputtype",
"==",
"'G'",
"and",
"'G'",
"in",
"allowedformats",
"and",
"outputformat",
"==",
"'TN'",
":",
"TN",
"=",
"TemporalNetwork",
"(",
"from_array",
"=",
"netIn",
")",
"# Get network type if not set yet",
"elif",
"inputtype",
"==",
"'G'",
"and",
"'G'",
"in",
"allowedformats",
":",
"netInfo",
"=",
"{",
"}",
"netInfo",
"[",
"'netshape'",
"]",
"=",
"netIn",
".",
"shape",
"netInfo",
"[",
"'nettype'",
"]",
"=",
"gen_nettype",
"(",
"netIn",
")",
"G",
"=",
"netIn",
"elif",
"inputtype",
"==",
"'C'",
"and",
"outputformat",
"==",
"'C'",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"'Input invalid.'",
")",
"if",
"outputformat",
"==",
"'TN'",
"and",
"not",
"isinstance",
"(",
"TN",
".",
"network",
",",
"str",
")",
":",
"TN",
".",
"network",
"[",
"'i'",
"]",
"=",
"TN",
".",
"network",
"[",
"'i'",
"]",
".",
"astype",
"(",
"int",
")",
"TN",
".",
"network",
"[",
"'j'",
"]",
"=",
"TN",
".",
"network",
"[",
"'j'",
"]",
".",
"astype",
"(",
"int",
")",
"TN",
".",
"network",
"[",
"'t'",
"]",
"=",
"TN",
".",
"network",
"[",
"'t'",
"]",
".",
"astype",
"(",
"int",
")",
"if",
"outputformat",
"==",
"'C'",
"or",
"outputformat",
"==",
"'G'",
":",
"netInfo",
"[",
"'inputtype'",
"]",
"=",
"inputtype",
"if",
"inputtype",
"!=",
"'C'",
"and",
"outputformat",
"==",
"'C'",
":",
"C",
"=",
"graphlet2contact",
"(",
"G",
",",
"netInfo",
")",
"if",
"outputformat",
"==",
"'G'",
":",
"return",
"G",
",",
"netInfo",
"elif",
"outputformat",
"==",
"'C'",
":",
"return",
"C",
"elif",
"outputformat",
"==",
"'TN'",
":",
"return",
"TN"
] | Takes input network and checks what the input is.
Parameters
----------
netIn : array, dict, or TemporalNetwork
Network (graphlet, contact or object)
allowedformats : str
Which format of network objects that are allowed. Options: 'C', 'TN', 'G'.
outputformat: str, default=G
Target output format. Options: 'C' or 'G'.
Returns
-------
C : dict
OR
G : array
Graphlet representation.
netInfo : dict
Metainformation about network.
OR
tnet : object
object of TemporalNetwork class | [
"Takes",
"input",
"network",
"and",
"checks",
"what",
"the",
"input",
"is",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L575-L646 |
497 | wiheto/teneto | teneto/utils/utils.py | clean_community_indexes | def clean_community_indexes(communityID):
"""
Takes input of community assignments. Returns reindexed community assignment by using smallest numbers possible.
Parameters
----------
communityID : array-like
list or array of integers. Output from community detection algorithems.
Returns
-------
new_communityID : array
cleaned list going from 0 to len(np.unique(communityID))-1
Note
-----
Behaviour of funciton entails that the lowest community integer in communityID will recieve the lowest integer in new_communityID.
"""
communityID = np.array(communityID)
cid_shape = communityID.shape
if len(cid_shape) > 1:
communityID = communityID.flatten()
new_communityID = np.zeros(len(communityID))
for i, n in enumerate(np.unique(communityID)):
new_communityID[communityID == n] = i
if len(cid_shape) > 1:
new_communityID = new_communityID.reshape(cid_shape)
return new_communityID | python | def clean_community_indexes(communityID):
communityID = np.array(communityID)
cid_shape = communityID.shape
if len(cid_shape) > 1:
communityID = communityID.flatten()
new_communityID = np.zeros(len(communityID))
for i, n in enumerate(np.unique(communityID)):
new_communityID[communityID == n] = i
if len(cid_shape) > 1:
new_communityID = new_communityID.reshape(cid_shape)
return new_communityID | [
"def",
"clean_community_indexes",
"(",
"communityID",
")",
":",
"communityID",
"=",
"np",
".",
"array",
"(",
"communityID",
")",
"cid_shape",
"=",
"communityID",
".",
"shape",
"if",
"len",
"(",
"cid_shape",
")",
">",
"1",
":",
"communityID",
"=",
"communityID",
".",
"flatten",
"(",
")",
"new_communityID",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"communityID",
")",
")",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"np",
".",
"unique",
"(",
"communityID",
")",
")",
":",
"new_communityID",
"[",
"communityID",
"==",
"n",
"]",
"=",
"i",
"if",
"len",
"(",
"cid_shape",
")",
">",
"1",
":",
"new_communityID",
"=",
"new_communityID",
".",
"reshape",
"(",
"cid_shape",
")",
"return",
"new_communityID"
] | Takes input of community assignments. Returns reindexed community assignment by using smallest numbers possible.
Parameters
----------
communityID : array-like
list or array of integers. Output from community detection algorithems.
Returns
-------
new_communityID : array
cleaned list going from 0 to len(np.unique(communityID))-1
Note
-----
Behaviour of funciton entails that the lowest community integer in communityID will recieve the lowest integer in new_communityID. | [
"Takes",
"input",
"of",
"community",
"assignments",
".",
"Returns",
"reindexed",
"community",
"assignment",
"by",
"using",
"smallest",
"numbers",
"possible",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L649-L680 |
498 | wiheto/teneto | teneto/utils/utils.py | multiple_contacts_get_values | def multiple_contacts_get_values(C):
"""
Given an contact representation with repeated contacts, this function removes duplicates and creates a value
Parameters
----------
C : dict
contact representation with multiple repeated contacts.
Returns
-------
:C_out: dict
Contact representation with duplicate contacts removed and the number of duplicates is now in the 'values' field.
"""
d = collections.OrderedDict()
for c in C['contacts']:
ct = tuple(c)
if ct in d:
d[ct] += 1
else:
d[ct] = 1
new_contacts = []
new_values = []
for (key, value) in d.items():
new_values.append(value)
new_contacts.append(key)
C_out = C
C_out['contacts'] = new_contacts
C_out['values'] = new_values
return C_out | python | def multiple_contacts_get_values(C):
d = collections.OrderedDict()
for c in C['contacts']:
ct = tuple(c)
if ct in d:
d[ct] += 1
else:
d[ct] = 1
new_contacts = []
new_values = []
for (key, value) in d.items():
new_values.append(value)
new_contacts.append(key)
C_out = C
C_out['contacts'] = new_contacts
C_out['values'] = new_values
return C_out | [
"def",
"multiple_contacts_get_values",
"(",
"C",
")",
":",
"d",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"c",
"in",
"C",
"[",
"'contacts'",
"]",
":",
"ct",
"=",
"tuple",
"(",
"c",
")",
"if",
"ct",
"in",
"d",
":",
"d",
"[",
"ct",
"]",
"+=",
"1",
"else",
":",
"d",
"[",
"ct",
"]",
"=",
"1",
"new_contacts",
"=",
"[",
"]",
"new_values",
"=",
"[",
"]",
"for",
"(",
"key",
",",
"value",
")",
"in",
"d",
".",
"items",
"(",
")",
":",
"new_values",
".",
"append",
"(",
"value",
")",
"new_contacts",
".",
"append",
"(",
"key",
")",
"C_out",
"=",
"C",
"C_out",
"[",
"'contacts'",
"]",
"=",
"new_contacts",
"C_out",
"[",
"'values'",
"]",
"=",
"new_values",
"return",
"C_out"
] | Given an contact representation with repeated contacts, this function removes duplicates and creates a value
Parameters
----------
C : dict
contact representation with multiple repeated contacts.
Returns
-------
:C_out: dict
Contact representation with duplicate contacts removed and the number of duplicates is now in the 'values' field. | [
"Given",
"an",
"contact",
"representation",
"with",
"repeated",
"contacts",
"this",
"function",
"removes",
"duplicates",
"and",
"creates",
"a",
"value"
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L683-L718 |
499 | wiheto/teneto | teneto/utils/utils.py | check_distance_funciton_input | def check_distance_funciton_input(distance_func_name, netinfo):
"""
Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function.
Parameters
----------
distance_func_name : str
distance function name.
netinfo : dict
the output of utils.process_input
Returns
-------
distance_func_name : str
distance function name.
"""
if distance_func_name == 'default' and netinfo['nettype'][0] == 'b':
print('Default distance funciton specified. As network is binary, using Hamming')
distance_func_name = 'hamming'
elif distance_func_name == 'default' and netinfo['nettype'][0] == 'w':
distance_func_name = 'euclidean'
print(
'Default distance funciton specified. '
'As network is weighted, using Euclidean')
return distance_func_name | python | def check_distance_funciton_input(distance_func_name, netinfo):
if distance_func_name == 'default' and netinfo['nettype'][0] == 'b':
print('Default distance funciton specified. As network is binary, using Hamming')
distance_func_name = 'hamming'
elif distance_func_name == 'default' and netinfo['nettype'][0] == 'w':
distance_func_name = 'euclidean'
print(
'Default distance funciton specified. '
'As network is weighted, using Euclidean')
return distance_func_name | [
"def",
"check_distance_funciton_input",
"(",
"distance_func_name",
",",
"netinfo",
")",
":",
"if",
"distance_func_name",
"==",
"'default'",
"and",
"netinfo",
"[",
"'nettype'",
"]",
"[",
"0",
"]",
"==",
"'b'",
":",
"print",
"(",
"'Default distance funciton specified. As network is binary, using Hamming'",
")",
"distance_func_name",
"=",
"'hamming'",
"elif",
"distance_func_name",
"==",
"'default'",
"and",
"netinfo",
"[",
"'nettype'",
"]",
"[",
"0",
"]",
"==",
"'w'",
":",
"distance_func_name",
"=",
"'euclidean'",
"print",
"(",
"'Default distance funciton specified. '",
"'As network is weighted, using Euclidean'",
")",
"return",
"distance_func_name"
] | Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function.
Parameters
----------
distance_func_name : str
distance function name.
netinfo : dict
the output of utils.process_input
Returns
-------
distance_func_name : str
distance function name. | [
"Funciton",
"checks",
"distance_func_name",
"if",
"it",
"is",
"specified",
"as",
"default",
".",
"Then",
"given",
"the",
"type",
"of",
"the",
"network",
"selects",
"a",
"default",
"distance",
"function",
"."
] | 80d7a83a9adc1714589b020627c45bd5b66248ab | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L757-L786 |
Subsets and Splits