text
stringlengths
957
885k
# Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = ['Renderer'] import sys import os import re import mimetypes from contextlib import contextmanager import commonmark import commonmark_extensions.tables from .assets import assets from .log import log from .reference import * from .parse import * # Effectively disable implicit code blocks commonmark.blocks.CODE_INDENT = 1000 class CustomRendererWithTables(commonmark_extensions.tables.RendererWithTables): def make_table_node(self, node): return '<table class="user">' # https://github.com/GovReady/CommonMark-py-Extensions/issues/3#issuecomment-756499491 # Thanks to hughdavenport class TableWaitingForBug3(commonmark_extensions.tables.Table): @staticmethod def continue_(parser=None, container=None): ln = parser.current_line if not parser.indented and commonmark.blocks.peek(ln, parser.next_nonspace) == "|": parser.advance_next_nonspace() parser.advance_offset(1, False) elif not parser.indented and commonmark.blocks.peek(ln, parser.next_nonspace) not in ("", ">", "`", None): pass else: return 1 return 0 commonmark.blocks.Table = TableWaitingForBug3 class Renderer: """ Takes a Parser object and provides an interface to generate rendered HTML. """ def __init__(self, parser): self.parser = parser self.config = parser.config self.ctx = parser.ctx # Create a pseudo Reference for the search page using the special 'search' type. # This is used to ensure the relative paths are correct. Use the name '--search' # as this won't conflict with any user-provided names. parser.refs['--search'] = Reference(parser, type='search', symbol='Search') self._templates = { 'head': assets.get('head.tmpl.html', encoding='utf8'), 'foot': assets.get('foot.tmpl.html', encoding='utf8'), 'search': assets.get('search.tmpl.html', encoding='utf8'), } self._assets_version = assets.hash()[:7] def _get_root_path(self): """ Returns the path prefix for the document root, which is relative to the current context. """ # The topref of the current context's reference. The path will be relative to # this topref's file. viatopref = self.ctx.ref.topref if (viatopref.type == 'manual' and viatopref.name == 'index') or viatopref.type == 'search': return '' else: return '../' def _get_ref_link_info(self, ref): """ Returns (html file name, URL fragment) of the given Reference object. """ # The top-level Reference object that holds this reference, which respects @within. topsym = ref.userdata.get('within_topsym') or ref.topsym try: topref = self.parser.refs[topsym] except KeyError: raise KeyError('top-level reference "%s" not found (from "%s")' % (topsym, ref.name)) from None prefix = self._get_root_path() if ref.topref.type != 'manual' or ref.topref.name != 'index': prefix += '{}/'.format(topref.type) if ref.topref.type == 'manual': # Manuals don't use fully qualified fragments. fragment = '#' + ref.symbol if ref.scopes else '' else: fragment = '#{}'.format(ref.name) if ref.name != ref.topsym else '' return prefix + (ref.userdata.get('within_topsym') or ref.topsym) + '.html', fragment def _get_ref_href(self, ref): """ Returns the href src for the given Reference object, which is directly used in <a> tags in the rendered content. """ file, fragment = self._get_ref_link_info(ref) return file + fragment def _render_ref_markdown(self, ref, text, code=False): """ Returns the Reference as a markdown link. If code is True, then the given text is wrapped in backticks. """ backtick = '`' if code else '' return '[{tick}{text}{parens}{tick}]({href})'.format( tick=backtick, text=text or ref.name, parens='()' if ref.type == 'function' and not text else '', href=self._get_ref_href(ref) ) def _render_ref_markdown_re(self, m): """ Regexp callback to handle the @{refname} case. """ code = (m.group(1) == '`') ref = self.parser._resolve_ref(m.group(2)) if ref: return self._render_ref_markdown(ref, m.group(3), code=code) else: log.warning('%s:~%s: reference "%s" could not be resolved', self.ctx.file, self.ctx.line, m.group(2)) return m.group(3) or m.group(2) def _render_backtick_ref_markdown_re(self, m): """ Regexp callback to handle the `refname` case. """ ref = self.parser._resolve_ref(m.group(1)) if ref: return self._render_ref_markdown(ref, text=m.group(1), code=True) else: return '`{}`'.format(m.group(1)) def _refs_to_markdown(self, block): """ Replaces `refname` and @{refname} in the given block of text with markdown links. """ # Resolve `ref` block = re.sub(r'(?<!`)`([^` ]+)`', self._render_backtick_ref_markdown_re, block, 0, re.S) # Resolve @{ref} and @{ref|text}. Do this *after* `ref` in case the ref is in the # form `@{stuff}`. block = re.sub(r'(`)?@{([^}|]+)(?:\|([^}]*))?}(`)?', self._render_ref_markdown_re, block, 0, re.S) return block def _content_to_markdown(self, content, strip_comments=True): """ Converts a docstring block into markdown. Docstring blocks can appear in sections, or as content associated with a function definition or field. This function returns 3 values: a dict of name -> (type, docstring) for @tparam tags, a list of (type, docstrings) for @treturn tags, and a string holding the converted content to markdown. """ if not content: return None, None, '' output = [] params = {} returns = [] # List of [tag, args, indent, lines] tagstack = [] supported_tags = 'tparam', 'treturn', 'usage', 'example', 'code', 'see', 'warning', 'note' def end_tag(): tag, args, indent, lines = tagstack.pop() target = tagstack[-1][3] if tagstack else output if tag in ('usage', 'example'): target.append('##### ' + tag.title()) if tag in ('usage', 'example', 'code'): target.append('```lua') # Remove trailing newlines. while lines and not lines[-1].strip(): lines.pop() # Dedent all lines according to the indentation of the # first line. indent = len(re.search(r'^( *)', lines[0]).group(1)) target.extend([l[indent:] for l in lines]) target.append('```') elif tag == 'tparam' and len(args) >= 2: types = args[0].split('|') name = args[1] params[name] = types, ' '.join(args[2:] + lines) elif tag == 'treturn': types = args[0].split('|') doc = ' '.join(args[1:] + lines) returns.append((types, doc)) elif tag == 'see': refs = ['@{{{}}}'.format(see) for see in args] target.append('\x01\x03See also {}</div>'.format(', '.join(refs))) elif tag == 'warning' or tag == 'note': html = '\x01\x02{}\x01{}\x01{}\n</div></div>\n' heading = ' '.join(args) if args else tag.title() target.append(html.format(tag, heading, '\n'.join(lines))) def end_tags(all, line=None, indent=None): if not all: end_tag() else: while tagstack: end_tag() if line and tagstack: last_tag_indent = tagstack[-1][2] tagstack[-1][3].append(line) line = None return line last_line = content[-1][0] for n, line in content: self.ctx.update(line=n) tag, args = self.parser._parse_tag(line, require_comment=strip_comments) if strip_comments: line = line.lstrip('-').rstrip() indent = len(re.search(r'^( *)', line).group(1)) if tagstack: last_tag_indent = tagstack[-1][2] # Determine threshold at which we will consider the last tag to have # terminated. if tag: # Any tag at the same level as the last tag (or below) will close threshold = last_tag_indent else: threshold = last_tag_indent if not tag and indent > threshold and line: tagstack[-1][3].append(line) line = None if n == last_line or (line and indent <= threshold): line = end_tags(n == last_line, line if not tag else None, indent) if tag: tagstack.append([tag, args, indent, []]) if tag not in supported_tags: log.error('%s:%s: unknown tag @%s', self.ctx.file, n, tag) elif n == last_line: end_tags(n == last_line) elif line is not None: if tagstack: last = tagstack[-1] last[3].append(line) else: output.append(line) return params, returns, '\n'.join(output) def _get_first_sentence(self, md): """ Returns the first sentence from the given markdown. """ # This is rather cheeky, but just handles these common abbreviations so they don't # interpreted as end-of-sentence. escape = lambda s: s.replace('e.g.', 'e\x00g\x00').replace('i.e.', 'i\x00e\x00') unescape = lambda s: s.replace('\x00', '.') m = re.search(r'^(.+?[.?!])(?: |$|\n)(.*)', escape(md), re.S) if m: sentence, remaining = m.groups() # Remove period, but preserve other sentence-ending punctuation return unescape(sentence).strip().rstrip('.'), unescape(remaining).strip() else: return md, '' def _markdown_to_html(self, md): """ Renders the given markdown as HTML and returns the result. """ md = self._refs_to_markdown(md) parser = commonmark_extensions.tables.ParserWithTables() ast = parser.parse(md) html = CustomRendererWithTables().render(ast) def replace_admonition(m): type, title = m.groups() return '<div class="admonition {}"><div class="title">{}</div><div class="body"><p>'.format(type, title) html = re.sub(r'\x01\x02([^\x01]+)\x01([^\x01]+)\x01', replace_admonition, html) html = html.replace('\x01\x03', '<div class="see">') # As a result of our disgusting abuse of commonmark, we end up with divs inside # paragraphs, which is invalid. Take care of this now. html = html.replace('<p><div', '<div').replace('</div></p>', '</div>') return html def _markdown_to_text(self, md): """ Strips markdown codes from the given markdown and returns the result. """ # Code blocks text = re.sub(r'```.*?```', '', md, flags=re.S) # Inline preformatted code text = re.sub(r'`([^`]+)`', '\\1', text) # Headings text = re.sub(r'#+', '', text) # Bold text = re.sub(r'\*([^*]+)\*', '\\1', text) # Link or inline image text = re.sub(r'!?\[([^]]*)\]\([^)]+\)', '\\1', text) # Clean up non-markdown things. # Reference with custom display text = re.sub(r'@{[^|]+\|([^}]+)\}', '\\1', text) # Just a reference text = re.sub(r'@{([^}]+)\}', '\\1', text) # Custom things like admonitions text = re.sub(r'\x01(\x02|\x03).*(</div>|\x01)', '', text) text = text.replace('</div>', '') # Consolidate multiple whitespaces text = re.sub(r'\s+', ' ', text) return text.strip() def _types_to_html(self, types): """ Resolves references in the given list of types, and returns HTML of all types in a human-readable string. """ resolved = [] for tp in types: ref = self.parser._resolve_ref(tp) if ref: href = self._get_ref_href(ref) tp = '<a href="{}">{}</a>'.format(href, tp) resolved.append('<em>{}</em>'.format(tp)) if len(resolved) <= 1: return ''.join(resolved) else: return ', '.join(resolved[:-1]) + ' or ' + resolved[-1] def preprocess(self, topref): """ Preprocesses the given topref, rendering its content to HTML and storing the result in an HTML attribute, which is a list holding the individual rendered lines. """ topref.html = [] if topref.type in ('class', 'module'): topref.userdata['empty'] = not self._render_classmod(topref, topref.html.append) elif topref.type == 'manual': self._render_manual(topref, topref.html.append) def _render_user_links(self, topref, root, out): sections = sorted(s for s in self.config.sections() if s.startswith('link')) for section in sections: img = self.config.get(section, 'icon', fallback=None) cls = '' if img: if img in ('download', 'github', 'gitlab', 'bitbucket'): img = '{root}img/i-' + img + '.svg?' + self._assets_version img = '<img src="{}" alt=""/>'.format(img.replace('{root}', root)) cls = ' iconleft' out('<div class="button{}"><a href="{}" title="{}">{}<span>{}</span></a></div>'.format( cls, self.config.get(section, 'url', fallback='').replace('{root}', root), self.config.get(section, 'tooltip', fallback=''), img or '', self.config.get(section, 'text'), )) @contextmanager def _render_html(self, topref, lines): """ A context manager that renders the page frame for the given topref, and yields a function that appends a line to the page within the inner content area. """ self.ctx.update(ref=topref) sections = self.parser._get_sections(topref) if not sections and topref.type == 'manual': log.critical('manual "%s" has no sections (empty doc or possible symbol collision)', topref.name) sys.exit(1) title = self.config.get('project', 'name', fallback='Lua Project') html_title = '{} - {}'.format( sections[0].display if topref.type == 'manual' else topref.name, title ) # Alias to improve readability out = lines.append root = self._get_root_path() head = [] css = self.config.get('project', 'css', fallback=None) if css: head.append('<link href="{}{}?{}" rel="stylesheet" />'.format(root, css, self._assets_version)) favicon = self.config.get('project', 'favicon', fallback=None) if favicon: mimetype, _ = mimetypes.guess_type(favicon) mimetype = ' type="{}"'.format(mimetype) if mimetype else '' # Favicon is always copied to doc root, so take only the filename _, favicon = os.path.split(favicon) head.append('<link rel="shortcut icon" {} href="{}{}?{}"/>'.format(mimetype, root, favicon, self._assets_version)) out(self._templates['head'].format( version=self._assets_version, title=html_title, head='\n'.join(head), root=root, bodyclass='{}-{}'.format(topref.type, re.sub(r'\W+', '', topref.name).lower()) )) toprefs = self.parser.topsyms.values() manual = [ref for ref in toprefs if ref.type == 'manual'] classes = sorted([ref for ref in toprefs if ref.type == 'class'], key=lambda ref: ref.name) modules = [ref for ref in toprefs if ref.type == 'module'] # Determine prev/next buttons relative to current topref. found = prevref = nextref = None for ref in manual + classes + modules: if found: nextref = ref break elif ref.topsym == topref.name or topref.type == 'search': found = True else: prevref = ref hometext = self.config.get('project', 'title', fallback=title) out('<div class="topbar">') out('<div class="group one">') if self.config.has_section('manual') and self.config.get('manual', 'index', fallback=None): path = '' if (topref.type == 'manual' and topref.name == 'index') else '../' out('<div class="button description"><a href="{}index.html"><span>{}</span></a></div>'.format(path, hometext)) else: out('<div class="description"><span>{}</span></div>'.format(hometext)) out('</div>') out('<div class="group two">') self._render_user_links(topref, root, out) out('</div>') out('<div class="group three">') if prevref: out('<div class="button iconleft"><a href="{}" title="{}"><img src="{}img/i-left.svg?{}" alt=""/><span>Previous</span></a></div>'.format( self._get_ref_href(prevref), prevref.name, root, self._assets_version )) if nextref: out('<div class="button iconright"><a href="{}" title="{}"><span>Next</span><img src="{}img/i-right.svg?{}" alt=""/></a></div>'.format( self._get_ref_href(nextref), nextref.name, root, self._assets_version )) out('</div>') out('</div>') # Determine section headings to construct sidebar. out('<div class="sidebar">') out('<form action="{}search.html">'.format(root)) # out('<form onsubmit="return window.search()">'.format(root)) out('<input class="search" name="q" type="search" placeholder="Search" />') out('</form>') if sections: out('<div class="sections">') out('<div class="heading">Contents</div>') out('<ul>') for section in sections: self.ctx.update(ref=section) _, _, md = self._content_to_markdown(section.content) if section.type in ('class', 'module'): section.heading = '{} <code>{}</code>'.format(section.type.title(), section.symbol) section.body = md elif section.topref.type == 'manual': section.heading = section.display section.body = md else: heading, section.body = self._get_first_sentence(md) section.heading = self._markdown_to_html(heading) out('<li><a href="#{}">{}</a></li>'.format(section.symbol, section.heading)) out('</ul>') out('</div>') if self.parser.parsed['manual']: out('<div class="manual">') out('<div class="heading">Manual</div>') out('<ul>') for ref in self.parser.parsed['manual']: if ref.scope: # This is a section heading, or it's the index document, so don't include # it in the list of manual pages. continue cls = ' class="selected"' if ref.name == topref.name else '' out('<li{}><a href="{}">{}</a></li>'.format(cls, self._get_ref_href(ref), ref.display)) out('</ul>') out('</div>') if classes: out('<div class="classes">') out('<div class="heading">Classes</div>') out('<ul>') for ref in classes: cls = ' class="selected"' if ref.name == topref.name else '' out('<li{}><a href="{}">{}</a></li>'.format(cls, self._get_ref_href(ref), ref.display)) out('</ul>') out('</div>') if modules: out('<div class="modules">') out('<div class="heading">Modules</div>') out('<ul>') for ref in modules: if ref.userdata.get('empty') and ref.implicit: # Skip empty implicit module continue cls = ' class="selected"' if ref.name == topref.name else '' out('<li{}><a href="{}">{}</a></li>'.format(cls, self._get_ref_href(ref), ref.name)) out('</ul>') out('</div>') # End sidebar out('</div>') out('<div class="body">') try: yield out finally: out('</div>') out(self._templates['foot'].format(root=root, version=self._assets_version)) def render(self, topref): """ Renders a preprocessed topref to HTML, returning a string containing the rendered HTML. preprocess() must have been called on the topref first. """ lines = [] with self._render_html(topref, lines) as out: out('\n'.join(topref.html)) return '\n'.join(lines) def _permalink(self, id): """ Returns the HTML for a permalink used for directly linkable references such as section headings, functions, fields, etc. """ return '<a class="permalink" href="#{}" title="Permalink to this definition">¶</a>'.format(id) def _render_manual(self, manualref, out): """ Renders the given manual top-level Reference as HTML, calling the given out() function for each line of HTML. """ out('<div class="manual">') if manualref.content: # Include any preamble before the first heading. _, _, md = self._content_to_markdown(manualref.content, strip_comments=False) out(self._markdown_to_html(md)) for section in self.parser._get_sections(manualref): self.ctx.update(ref=section) level = section.flags['level'] out('<h{} id="{}">{}'.format(level, section.symbol, section.display)) out(self._permalink(section.symbol)) out('</h{}>'.format(level)) _, _, md = self._content_to_markdown(section.content, strip_comments=False) out(self._markdown_to_html(md)) out('</div>') def _render_classmod(self, topref, out): """ Renders the given class or module top-level Reference as HTML, calling the given out() function for each line of HTML. """ has_content = False for section in self.parser._get_sections(topref): self.ctx.update(ref=section) # Parse out section heading and body. _, _, md = self._content_to_markdown(section.content) if section.type in ('class', 'module'): section.heading = '{} <code>{}</code>'.format(section.type.title(), section.symbol) section.body = md elif section.topref.type == 'manual': section.heading = section.display section.body = md else: heading, section.body = self._get_first_sentence(md) # Fall back to section name if there is no content for the heading. section.heading = self._markdown_to_html(heading) if heading.strip() else section.name out('<div class="section">') out('<h2 class="{}" id="{}">{}'.format( section.type, section.symbol, # Heading converted from markdown contains paragraph tags, and it # isn't valid HTML for headings to contain block elements. section.heading.replace('<p>', '').replace('</p>', '') )) out(self._permalink(section.symbol)) out('</h2>') out('<div class="inner">') h = section.hierarchy if len(h) > 1: out('<div class="hierarchy">') out('<div class="heading">Class Hierarchy</div>') out('<ul>') for n, cls in enumerate(h): if cls == section: html = cls.name self_class = ' self' else: html = self._types_to_html([cls.name]) self_class = '' prefix = (('&nbsp;'*(n-1)*6) + '&nbsp;└─ ') if n > 0 else '' out('<li class="class{}">{}<span>{}</span></li>'.format(self_class, prefix, html)) out('</ul>') out('</div>') # section.heading and section.body is set by _render_html() if section.body: out(self._markdown_to_html(section.body)) functions = list(self.parser._get_elements_in_section('function', section.section, section.topsym)) fields = list(self.parser._get_elements_in_section('field', section.section, section.topsym)) has_content = has_content or section.body or functions or fields # functions.sort(key=lambda ref: ref.name) # fields.sort(key=lambda ref: ref.name) fields_title = 'Fields' fields_meta_columns = 0 fields_has_type_column = False for ref in fields: n = 0 if ref.scope.type == 'class': fields_title = 'Attributes' if ref.flags.get('meta'): n += 1 if ref.flags.get('type'): fields_has_type_column = True fields_meta_columns = max(n, fields_meta_columns) functions_title = 'Functions' functions_meta_columns = 0 for ref in functions: n = 0 if ref.scope.type == 'class' and ':' in ref.symbol: functions_title = 'Methods' if ref.flags.get('meta'): n += 1 functions_meta_columns = max(n, functions_meta_columns) # # Output synopsis for this section. # compact = section.flags.get('compact', []) fullnames = section.flags.get('fullnames') fields_compact = 'fields' in compact functions_compact = 'functions' in compact if functions or fields: out('<div class="synopsis">') if not fields_compact: out('<h3>Synopsis</h3>') if fields: if functions or not fields_compact: out('<div class="heading">{}</div>'.format(fields_title)) out('<table class="fields {}">'.format('compact' if fields_compact else '')) for ref in fields: out('<tr>') display = ref.name if fullnames else ref.symbol if not fields_compact: out('<td class="name"><a href="#{}"><var>{}</var></a></td>'.format(ref.name, display)) else: link = self._permalink(ref.name) out('<td class="name"><var id="{}">{}</var>{}</td>'.format(ref.name, display, link)) nmeta = fields_meta_columns if ref.flags.get('type'): types = self._types_to_html(ref.flags['type']) out('<td class="meta types">{}</td>'.format(types)) elif fields_has_type_column: out('<td class="meta"></td>') if ref.flags.get('meta'): html = self._markdown_to_html(ref.flags['meta']) out('<td class="meta">{}</td>'.format(html)) nmeta -= 1 while nmeta > 0: out('<td class="meta"></td>') nmeta -= 1 _, _, ref.md = self._content_to_markdown(ref.content) md = self._get_first_sentence(ref.md)[0] if not fields_compact else ref.md if md: out('<td class="doc">{}</td>'.format(self._markdown_to_html(md))) out('</tr>') out('</table>') if functions: if fields or not functions_compact: out('<div class="heading">{}</div>'.format(functions_title)) out('<table class="functions {}">'.format('compact' if functions_compact else '')) for ref in functions: out('<tr>') # For compact view, remove topsym prefix from symbol display = ref.display_compact if ref.scope.type == 'class' else ref.display if not functions_compact: out('<td class="name"><a href="#{}"><var>{}</var></a>()</td>'.format(ref.name, display)) else: link = self._permalink(ref.name) args = ', '.join('<em>{}</em>'.format(arg) for arg in ref.extra) html = '<td class="name"><var id="{}">{}</var>({}){}</td>' out(html.format(ref.name, display, args, link)) meta = functions_meta_columns if ref.flags.get('meta'): out('<td class="meta">{}</td>'.format(ref.flags['meta'])) meta -= 1 while meta > 0: out('<td class="meta"></td>') meta -= 1 ref.params, ref.returns, ref.md = self._content_to_markdown(ref.content) md = self._get_first_sentence(ref.md)[0] if not functions_compact else md out('<td class="doc">{}</td>'.format(self._markdown_to_html(md))) out('</tr>') out('</table>') out('</div>') # # Output fields for this section # if fields and not fields_compact: if functions: out('<h3 class="fields">{}</h3>'.format(fields_title)) out('<dl class="fields">') for ref in fields: out('<dt id="{}">'.format(ref.name)) out('<span class="icon"></span><var>{}</var>'.format(ref.display)) if ref.flags.get('type'): types = self._types_to_html(ref.flags['type']) out('<span class="tag type">{}</span>'.format(types)) if ref.flags.get('meta'): out('<span class="tag meta">{}</span>'.format(ref.flags['meta'])) out(self._permalink(ref.name)) out('</dt>') out('<dd>') out(self._markdown_to_html(ref.md)) out('</dd>') out('</dl>') # # Output functions for this section # if functions and not functions_compact: if fields: out('<h3 class="functions">{}</h3>'.format(functions_title)) out('<dl class="functions">') for ref in functions: args = ', '.join('<em>{}</em>'.format(arg) for arg in ref.extra) out('<dt id="{}">'.format(ref.name)) out('<span class="icon"></span><var>{}</var>({})'.format(ref.display, args)) if ref.flags.get('meta'): out('<span class="tag meta">{}</span>'.format(ref.flags['meta'])) out(self._permalink(ref.name)) out('</dt>') out('<dd>') out(self._markdown_to_html(ref.md)) if ref.params: out('<div class="heading">Parameters</div>') out('<table class="parameters">') for arg in ref.extra: try: types, doc = ref.params[arg] except KeyError: log.warning('%s() missing @tparam for "%s" parameter', ref.name, arg) types = [] doc = '' out('<tr>') out('<td class="name"><var>{}</var></td>'.format(arg)) out('<td class="types">({})</td>'.format(self._types_to_html(types))) out('<td class="doc">{}</td>'.format(self._markdown_to_html(doc))) out('</tr>') out('</table>') if ref.returns: out('<div class="heading">Return Values</div>') out('<table class="returns">') for n, (types, doc) in enumerate(ref.returns, 1): out('<tr>') if len(ref.returns) > 1: out('<td class="name">{}.</td>'.format(n)) out('<td class="types">({})</td>'.format(self._types_to_html(types))) out('<td class="doc">{}</td>'.format(self._markdown_to_html(doc))) out('</tr>') out('</table>') out('</dd>') out('</dl>') # Close inner section out('</div>') # Close outer section out('</div>') return has_content def render_search_index(self): log.info('generating search index') topref = self.parser.refs['--search'] self.ctx.update(ref=topref) lines = [] out = lines.append def add(ref, tp): href = self._get_ref_href(ref) _, _, md = self._content_to_markdown(ref.content) text = self._markdown_to_text(md) title = ref.display if tp == 'section' and ref.topref.type != 'manual': # Non-manual sections typically use the first sentence as the section # title. This heuristic uses the first sentence only if it's less than 80 # characters, otherwise falls back to the section title. first, remaining = self._get_first_sentence(text) if len(first) < 80: title = first text = remaining text = text.replace('"', '\\"').replace('\n', ' ') title = title.replace('"', '\\"').replace('\n', ' ') if tp == 'module': title = title.split('.', 1)[-1] out('{{path:"{}", type:"{}", title:"{}", text:"{}"}},'.format(href, tp, title, text)) out('var docs = [') for tp in 'class', 'module', 'field', 'function', 'section': for ref in self.parser.parsed[tp]: add(ref, tp) out('];') return '\n'.join(lines) def render_search_page(self): root = self._get_root_path() topref = self.parser.refs['--search'] topref.html = [self._templates['search'].format(root=root, version=self._assets_version)] return self.render(topref) def render_landing_page(self): # A bit lazy to reuse the search topref here, but we just need a reference # from the same directory so the link paths are correct. topref = self.parser.refs['--search'] topref.html = [] return self.render(topref)
################################################################################ # https://github.com/rhoposit/style_factors # # Centre for Speech Technology Research # University of Edinburgh, UK # Copyright (c) 2014-2015 # All Rights Reserved. # # The system as a whole and most of the files in it are distributed # under the following copyright and conditions # # Permission is hereby granted, free of charge, to use and distribute # this software and its documentation without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of this work, and to # permit persons to whom this work is furnished to do so, subject to # the following conditions: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # - The authors' names may not be used to endorse or promote products derived # from this software without specific prior written permission. # # THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK # DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT # SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE # FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN # AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, # ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF # THIS SOFTWARE. ################################################################################ from __future__ import print_function, division import warnings warnings.filterwarnings('ignore') from numpy.random import seed seed(1) from tensorflow import set_random_seed set_random_seed(2) import keras from keras.utils import plot_model from keras import regularizers from keras.datasets import mnist from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D from keras.layers import MaxPooling2D, merge, LSTM from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.models import Sequential, Model from keras.optimizers import Adam from keras import losses from keras.utils import to_categorical from keras.callbacks import EarlyStopping import keras.backend as K from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.svm import SVC from sklearn.neural_network import MLPClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score from sklearn import decomposition import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import sys, os import numpy as np np.set_printoptions(suppress=True) ######################################################################################### # To Run: # python 04_PCA.py [x|i] [iemocap|ivie] [orig|dnn-aev|dnn-ae1|dnn-ae2|dnn-ae3|dnn-aec] # Examples: # python 04_PCA.py x ivie dnn-aev # python 04_PCA.py i ivie orig ######################################################################################### x_or_i = sys.argv[1] # select x or i vectors to work with exp = sys.argv[2] # helps where to find the data: orig, dnn-ae1, dnn-ae2, etc dataset = sys.argv[3] l2_val = 0.0001 # load up the data def load_numpy(X_file, y_file): print(X_file) print(y_file) X_train = np.load(X_file+"_train.npy") X_valid = np.load(X_file+"_valid.npy") X_test = np.load(X_file+"_test.npy") y_train = np.load(y_file+"_train.npy") y_valid = np.load(y_file+"_valid.npy") y_test = np.load(y_file+"_test.npy") return X_train, X_valid, X_test, y_train, y_valid, y_test def write_to_file(xdata, ydata, outfile): output = open(outfile, "w") for spk, utt in zip(ydata, xdata): utt_string = " ".join(list(map(str, utt))) outstring = spk+" [ "+str(utt_string)+" ]\n" output.write(outstring) output.close() def min_categorical_crossentropy(y_true, y_pred): return -K.categorical_crossentropy(y_true, y_pred) def special_scaling(train, valid, test): maxi = train.max() mini = train.min() meani = train.mean() stdi = train.std() X_train = (train - meani) / stdi X_valid = (valid - meani) / stdi X_test = (test - meani) / stdi return np.array(X_train), np.array(X_valid), np.array(X_test), meani, stdi, maxi, mini def load_spk_data(exp_folder, x_or_i, dataset): ret = [] yfile = exp_folder+"/"+x_or_i+"_utt2spk_"+dataset input = open(yfile, "r") y = input.read().split("\n") input.close() for item in y: spk = item.split(" ")[0] ret.append(spk) return ret def scale_back(train, valid, test, meani, stdi, maxi, mini): # train = np.array([(x_i * (maxi - mini)) + mini for x_i in train]) # valid = np.array([(x_i * (maxi - mini)) + mini for x_i in valid]) # test = np.array([(x_i * (maxi - mini)) + mini for x_i in test]) X_train = (train * stdi) + meani X_valid = (valid * stdi) + meani X_test = (test * stdi) + meani return X_train, X_valid, X_test def scale_back_test(test, meani, stdi, maxi, mini): # test = np.array([(x_i * (maxi - mini)) + mini for x_i in test]) X_test = (test * stdi) + meani return X_test def save_ydata(exp_folder, x_or_i, spk_train, spk_valid, spk_test): # save y-data outfile = exp_folder+"/"+x_or_i+"_y_train.npy" np.save(outfile, spk_train) outfile = exp_folder+"/"+x_or_i+"_y_valid.npy" np.save(outfile, spk_valid) outfile = exp_folder+"/"+x_or_i+"_y_test.npy" np.save(outfile, spk_test) def save_train_data(exp_folder,x_or_i,latent_dim,train_ae,valid_ae,test_ae,y_train, y_valid, y_test,recon, spk_train, spk_valid, spk_test): # save txt versions of reconstructed data train_outfile = exp_folder+"/"+x_or_i+"_utts_"+recon+"_"+str(latent_dim)+"_train.txt" valid_outfile = exp_folder+"/"+x_or_i+"_utts_"+recon+"_"+str(latent_dim)+"_valid.txt" test_outfile = exp_folder+"/"+x_or_i+"_utts_"+recon+"_"+str(latent_dim)+"_test.txt" write_to_file(train_ae, spk_train, train_outfile) write_to_file(valid_ae, spk_valid, valid_outfile) write_to_file(test_ae, spk_test, test_outfile) # save numpy versions of reconstructed data outfile = train_outfile.split(".txt")[0]+".npy" np.save(outfile, train_ae) outfile = valid_outfile.split(".txt")[0]+".npy" np.save(outfile, valid_ae) outfile = test_outfile.split(".txt")[0]+".npy" np.save(outfile, test_ae) # define this DNN class DNN(): def __init__(self, num_styles, img_shape): # define the shape of the input, num classes, etc self.channels = 1 self.num_styles = num_styles self.img_shape = img_shape optimizer = Adam(0.0002) img = Input(shape=self.img_shape) print("shape of input in init: ", img.shape) self.style_classifier_DNN = self.build_style_classifier_DNN() labels = self.style_classifier_DNN(img) self.style_classifier_DNN.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) def build_style_classifier_DNN(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(256,kernel_regularizer=regularizers.l2(l2_val), activation='relu')) model.add(Dense(256,kernel_regularizer=regularizers.l2(l2_val), activation='relu')) model.add(Dense(256,kernel_regularizer=regularizers.l2(l2_val), activation='relu')) model.add(Dense(self.num_styles, activation='softmax',)) model.summary() img = Input(shape=(self.img_shape)) labels = model(img) return Model(img, labels) def get_results(self, pred, truth, name, intype): score = accuracy_score(truth, pred) # save the output outstring = "" outstring += "*********** "+name+": "+intype+" ***********\n" outstring += name+" - acc: "+str(100*score)+"\n" outstring += str(classification_report(truth, pred))+"\n" outstring += str(confusion_matrix(truth, pred))+"\n" # print some stuff to terminal before writing to file print("*********** "+name+": "+intype+" ***********") print(name+" - acc: "+str(100*score)) print(str(classification_report(truth, pred))) print(str(confusion_matrix(truth, pred))) return def plot_history(self, H, l2_val, exp, latent_size, x_or_i): # grab the history object dictionary H = H.history # plot the training loss and accuracy N = np.arange(0, len(H["loss"])) plt.style.use("ggplot") plt.figure() plt.plot(N, H["loss"], label="train_loss") plt.plot(N, H["val_loss"], label="val_loss") plt.plot(N, H["acc"], label="train_acc") plt.plot(N, H["val_acc"], label="val_acc") plt.title(exp+" Style Prediction") plt.xlabel("Epoch #") plt.ylabel("Loss/Error") plt.legend() # save the figure l2_val = str(l2_val).replace(".", "_") plt.savefig("plot."+exp+"."+str(latent_size)+"."+x_or_i+"."+l2_val+".png") plt.close() def run_main(latent_size, x_or_i, dataset, l2_val): exp_folder = dataset+"_pca" # format changed slightly between orig and reconstructed from AE X_file = dataset+"/"+x_or_i+"_utts_X" y_file = dataset+"/"+x_or_i+"_utts_y" # load original data (X_train_orig, X_valid_orig, X_test_orig, y_train, y_valid, y_test) = load_numpy(X_file, y_file) X_train, X_valid, X_test, meani, stdi, maxi, mini = special_scaling(X_train_orig, X_valid_orig, X_test_orig) # run PCA and save it n_components = latent_size pca = decomposition.PCA(n_components) pca.fit(X_train) X_train = pca.transform(X_train) X_valid = pca.transform(X_valid) X_test = pca.transform(X_test) spk_train = load_spk_data(exp_folder, x_or_i, "train") spk_valid = load_spk_data(exp_folder, x_or_i, "valid") spk_test = load_spk_data(exp_folder, x_or_i, "test") train_enc, valid_enc, test_enc = scale_back(X_train, X_valid, X_test, meani, stdi, maxi, mini) save_train_data(exp_folder, x_or_i, latent_size, train_enc, valid_enc, test_enc, y_train, y_valid, y_test, "pca", spk_train, spk_valid, spk_test) save_ydata(exp_folder, x_or_i, y_train, y_valid, y_test) X_train = np.expand_dims(X_train, axis=3) X_valid = np.expand_dims(X_valid, axis=3) X_test = np.expand_dims(X_test, axis=3) classes = list(set(list(y_train))) num_classes = len(classes) print("Num classes: ", num_classes) print(classes) cats = {x:list(y_train).count(x) for x in list(y_train)} print(cats) cats = {x:list(y_valid).count(x) for x in list(y_valid)} print(cats) cats = {x:list(y_test).count(x) for x in list(y_test)} print(cats) # turn the y categories into 1-hot vectors for keras y_train = keras.utils.to_categorical(y_train, num_classes=num_classes) y_valid = keras.utils.to_categorical(y_valid, num_classes=num_classes) y_test = keras.utils.to_categorical(y_test, num_classes=num_classes) # val_method = "val_loss" # val_mode = "min" val_method = "val_acc" val_mode = "max" batch_size = 32 print(X_train.shape) print(X_valid.shape) print(X_test.shape) print(y_train.shape) print(y_valid.shape) print(y_test.shape) h = np.histogram(X_train) mean = np.mean(X_train) std = np.std(X_train) maximum = np.max(X_train) minimum = np.min(X_train) print(maximum, minimum, mean, std) h = np.histogram(X_valid) mean = np.mean(X_valid) std = np.std(X_valid) maximum = np.max(X_valid) minimum = np.min(X_valid) print(maximum, minimum, mean, std) early_stopping = EarlyStopping(monitor=val_method, min_delta=0, patience=2, verbose=1, mode=val_mode) callbacks_list = [early_stopping] latent_size = (X_train.shape[1], X_train.shape[2]) dnn = DNN(num_classes, latent_size) DNN_style_history = dnn.style_classifier_DNN.fit(X_train, y_train, batch_size=batch_size, epochs=100,shuffle=True, validation_data=[X_valid, y_valid], callbacks=callbacks_list) # dnn.plot_history(DNN_style_history, l2_val, exp_folder, latent_size, x_or_i) eval_result = dnn.style_classifier_DNN.evaluate(X_test, y_test) print("eval_result: ", eval_result) preds = dnn.style_classifier_DNN.predict(X_test) preds = np.argmax(preds, axis=1) test = np.argmax(y_test, axis=1) dnn.get_results(preds, test, "DNN-style", x_or_i+"-vector") return eval_result[0], eval_result[1] if x_or_i == "x": Z = [512, 400, 300, 200, 100, 50, 20, 10, 5] recon_size = 512 if x_or_i == "i": Z = [400, 300, 200, 100, 50, 20, 10, 5] recon_size = 400 RES = [] for z in Z: loss, acc = run_main(z, x_or_i, dataset, l2_val) enc_string = str(loss)+","+str(acc) RES.append(enc_string) print(dataset, "sweep", exp, x_or_i) for res in RES: print(res)
import json from functools import partial, reduce from collections import namedtuple from urllib.parse import unquote from itertools import groupby import boto3 import time import re s3 = boto3.resource('s3') pipe = lambda *args: lambda x: reduce(lambda a, fn: fn(a), args, x) S3Object = namedtuple('S3MetaData', ('bucket', 'key')) def get_records(event: dict) -> list: return event['Records'] def get_list(event: list) -> list: return [ el['s3'] for el in event ] def get_s3metadata(s3meta: list) -> S3Object: return [ S3Object(el['bucket']['name'], unquote(el['object']['key'])) for el in s3meta ] def load_file(m: list) -> str: try: obj = s3.Object(m[0].bucket, m[0].key) return obj.get()['Body'].read().decode('utf-8') except Exception as e: print(e) return [] def split_str(data: str) -> list: return data.split('\n') def remove_new_lines(data: list) -> list: return [ line for line in data if line ] def decode_json(data: list) -> list: return [ json.loads(line) for line in data ] def get_valid_filename(s): s = str(s).strip().replace(' ', '_') return re.sub(r'(?u)[^-\w.]', '', s).lower()[:50] def take_props(data: list) -> list: by_types = [] for el in data: if el['body']['t'] == 'event': by_types.append( (el['body']['tid'].lower(), el['body']['ds'], el['body']['t'], get_valid_filename(el['body']['ec']), el) ) else: by_types.append( (el['body']['tid'].lower(), el['body']['ds'], el['body']['t'], 'all', el) ) return by_types def group_by_ds(data: list) -> list: return [ (tid, ds, event, event_type, list(dt for tid, ds, ev, et, dt in data)) for tid, g1 in groupby(data, key=lambda x: x[0]) for ds, g2 in groupby(g1, key=lambda x: x[1]) for event, g3 in groupby(g2, key=lambda x: x[2]) for event_type, data in groupby(g3, key=lambda x: x[3]) ] def sort_data(data: list) -> list: return sorted(data, key=lambda t: (t[0],t[1],t[2],t[3])) def folder_name_events(*args): return f'system_source={args[0]}/tracking_id={args[1]}/data_source={args[2]}/event_type={args[3]}_{args[4]}/{args[5]}' def folder_name_all(*args): return f'system_source={args[0]}/tracking_id={args[1]}/data_source={args[2]}/event_type={args[3]}/{args[4]}' def construct_keys(event: dict, data: list) -> list: keys = pipe( sns_adapter, get_list, get_s3metadata, ) (event) bucket = keys[0].bucket folders = keys[0].key.split('/') base_folders = "/".join(folders[1:2]) partition_folders = "/".join(folders[2:6]) with_folder = [] for tid, ds, event, event_type, body in data: if event_type != 'all': with_folder.append( (bucket, tid, folder_name_events( base_folders, tid, ds, event, event_type, partition_folders ), ds, event, event_type, body) ) else: with_folder.append( (bucket, tid, folder_name_all( base_folders, tid, ds, event, partition_folders ), ds, event, event_type, body) ) return with_folder def construct_files(data, ts=time.strftime("%Y-%m-%dT%H:%M:%S%z", time.gmtime())): bucket, tid, folder, ds, event, event_type, body = data key = f'{tid}-{event}_{event_type}-{ts}' if event_type != 'all' else f'{tid}-{event}-{ts}' body_json = [json.dumps(record) for record in body] new_line_delimited = '\n'.join(body_json) return s3.Object(bucket, 'processed/' + folder + '/' + key).put(Body=new_line_delimited) def save_to_s3(data: list, ts=time.strftime("%Y-%m-%dT%H:%M:%S%z", time.gmtime())) -> str: try: operations = [construct_files(slice) for slice in data] return 'success' except Exception as e: print('it comes from the exception') print(e) return e def sns_adapter(event): records = event['Records'] messages = [record['Sns']['Message'] for record in records] try: decoded = [json.loads(message) for message in messages] records_list = [record['Records'] for record in decoded] flat_records_list = [ item for sublist in records_list for item in sublist ] return flat_records_list except Exception as e: print(e) return [] def handler(event, ctx): data = pipe( sns_adapter, get_list, get_s3metadata, load_file, split_str, remove_new_lines, decode_json, take_props, sort_data, group_by_ds, partial(construct_keys, event), )(event) try: return save_to_s3(data) except Exception as e: print(e) return e
<reponame>celio-jpeg/bev import datetime import time from sawtooth_sdk.processor.handler import TransactionHandler from sawtooth_sdk.processor.exceptions import InvalidTransaction from simple_supply_addressing import addresser from simple_supply_protobuf import payload_pb2 from simple_supply_tp.payload import BevPayload from simple_supply_tp.state import SimpleSupplyState SYNC_TOLERANCE = 60 * 5 MAX_LAT = 90 * 1e6 MIN_LAT = -90 * 1e6 MAX_LNG = 180 * 1e6 MIN_LNG = -180 * 1e6 class SimpleSupplyHandler(TransactionHandler): @property def family_name(self): return addresser.FAMILY_NAME @property def family_versions(self): return [addresser.FAMILY_VERSION] @property def namespaces(self): return [addresser.NAMESPACE] def apply(self, transaction, context): header = transaction.header payload = BevPayload(transaction.payload) state = SimpleSupplyState(context) _validate_timestamp(payload.timestamp) if payload.action == payload_pb2.BevPayload.CREATE_ELECTION: _create_election( state=state, public_key=header.signer_public_key, payload=payload) elif payload.action == payload_pb2.BevPayload.CREATE_VOTING_OPTION: _create_voting_option( state=state, public_key=header.signer_public_key, payload=payload) elif payload.action == payload_pb2.BevPayload.CREATE_POLL_REGISTRATION: _create_poll_registration( state=state, public_key=header.signer_public_key, payload=payload) elif payload.action == payload_pb2.BevPayload.CREATE_VOTER: _create_voter( state=state, public_key=header.signer_public_key, payload=payload) elif payload.action == payload_pb2.BevPayload.CREATE_VOTE: _create_vote( state=state, public_key=header.signer_public_key, payload=payload) elif payload.action == payload_pb2.BevPayload.UPDATE_VOTE: _update_vote( state=state, public_key=header.signer_public_key, payload=payload) elif payload.action == payload_pb2.BevPayload.UPDATE_ELECTION: _update_election( state=state, public_key=header.signer_public_key, payload=payload) elif payload.action == payload_pb2.BevPayload.UPDATE_VOTER: _update_voter( state=state, public_key=header.signer_public_key, payload=payload) elif payload.action == payload_pb2.BevPayload.UPDATE_VOTING_OPTION: _update_voting_option( state=state, public_key=header.signer_public_key, payload=payload) elif payload.action == payload_pb2.BevPayload.UPDATE_POLL_REGISTRATION: _update_poll_registration( state=state, public_key=header.signer_public_key, payload=payload) else: raise InvalidTransaction('Unhandled action') def _create_election(state, public_key, payload): if state.get_voter(public_key) is None: raise InvalidTransaction('Voter with the public key {} does ' 'not exist'.format(public_key)) state.set_election( election_id=payload.data.election_id, name=payload.data.name, description=payload.data.description, start_timestamp=payload.data.start_timestamp, end_timestamp=payload.data.end_timestamp, results_permission=payload.data.results_permission, can_change_vote=payload.data.can_change_vote, can_show_realtime=payload.data.can_show_realtime, admin_id=payload.data.admin_id, status=payload.data.status, timestamp=payload.timestamp ) def _create_voting_option(state, public_key, payload): if state.get_voter(public_key) is None: raise InvalidTransaction('Agent with the public key {} does ' 'not exist'.format(public_key)) state.set_voting_option( voting_option_id=payload.data.voting_option_id, name=payload.data.name, description=payload.data.description, election_id=payload.data.election_id, status=payload.data.status ) def _create_poll_registration(state, public_key, payload): if state.get_voter(public_key) is None: raise InvalidTransaction('Agent with the public key {} does ' 'not exist'.format(public_key)) state.set_poll_registration( voter_id=payload.data.voter_id, name=payload.data.name, election_id=payload.data.election_id, status=payload.data.status ) def _create_voter(state, public_key, payload): if state.get_voter(public_key): raise InvalidTransaction('Voter with the public key {} already ' 'exists'.format(public_key)) state.set_voter( voter_id=payload.data.voter_id, public_key=payload.data.public_key, name=payload.data.name, created_at=payload.data.created_at, type=payload.data.type) def _update_voter(state, public_key, payload): if state.get_voter(public_key) is None: raise InvalidTransaction('Voter with the public key {} does ' 'not exists'.format(public_key)) state.update_voter( voter_id=payload.data.voter_id, public_key=payload.data.public_key, name=payload.data.name, created_at=payload.data.created_at, type=payload.data.type) def _create_vote(state, public_key, payload): if state.get_voter(public_key) is None: raise InvalidTransaction('Voter with the public key {} does ' 'not exist'.format(public_key)) state.set_vote( vote_id=payload.data.vote_id, timestamp=payload.data.timestamp, voter_id=payload.data.voter_id, election_id=payload.data.election_id, voting_option_id=payload.data.voting_option_id ) def _update_vote(state, public_key, payload): if state.get_voter(public_key) is None: raise InvalidTransaction('Voter with the public key {} does ' 'not exist'.format(public_key)) state.update_vote( vote_id=payload.data.vote_id, timestamp=payload.data.timestamp, voting_option_id=payload.data.voting_option_id ) def _update_election(state, public_key, payload): if state.get_voter(public_key) is None: raise InvalidTransaction('Voter with the public key {} does ' 'not exist'.format(public_key)) state.update_election( election_id=payload.data.election_id, name=payload.data.name, description=payload.data.description, start_timestamp=payload.data.start_timestamp, end_timestamp=payload.data.end_timestamp, results_permission=payload.data.results_permission, can_change_vote=payload.data.can_change_vote, can_show_realtime=payload.data.can_show_realtime, admin_id=payload.data.admin_id, status=payload.data.status, timestamp=payload.timestamp ) def _update_voting_option(state, public_key, payload): if state.get_voter(public_key) is None: raise InvalidTransaction('Voter with the public key {} does ' 'not exist'.format(public_key)) state.update_voting_option( voting_option_id=payload.data.voting_option_id, name=payload.data.name, description=payload.data.description, election_id=payload.data.election_id, status=payload.data.status ) def _update_poll_registration(state, public_key, payload): if state.get_voter(public_key) is None: raise InvalidTransaction('Voter with the public key {} does ' 'not exist'.format(public_key)) state.update_poll_registration( voter_id=payload.data.voter_id, name=payload.data.name, election_id=payload.data.election_id, status=payload.data.status ) def _validate_timestamp(timestamp): """Validates that the client submitted timestamp for a transaction is not greater than current time, within a tolerance defined by SYNC_TOLERANCE NOTE: Timestamp validation can be challenging since the machines that are submitting and validating transactions may have different system times """ dts = datetime.datetime.utcnow() current_time = round(time.mktime(dts.timetuple()) + dts.microsecond / 1e6) if (timestamp - current_time) > SYNC_TOLERANCE: raise InvalidTransaction( 'Timestamp must be less than local time.' ' Expected {0} in ({1}-{2}, {1}+{2})'.format( timestamp, current_time, SYNC_TOLERANCE))
<gh_stars>0 import os import cv2 import sys import json import copy import collections import numpy as np from tqdm import tqdm import paddle from paddle.io import Dataset sys.path.insert(0, "../") class DocVQAExample(object): def __init__(self, question, doc_tokens, doc_boxes=[], answer=None, labels=None, image=None): self.question = question self.doc_tokens = doc_tokens self.doc_boxes = doc_boxes self.image = image self.answer = answer self.labels = labels class DocVQAFeatures(object): """A single set of features of data.""" def __init__(self, example_index, input_ids, input_mask, segment_ids, boxes=None, label=None): self.example_index = example_index self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.boxes = boxes self.label = label class DocVQA(Dataset): def __init__(self, args, tokenizer, label2id_map, max_seq_len=512, max_query_length=20, max_doc_length=512, max_span_num=1): super(DocVQA, self).__init__() self.tokenizer = tokenizer self.label2id_map = label2id_map self.max_seq_len = max_seq_len self.max_query_length = max_query_length self.max_doc_length = max_doc_length self.max_span_num = max_span_num self.sample_list = None self.args = args self.docvqa_inputs = self.docvqa_input() def check_is_max_context(self, doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def convert_examples_to_features(self, examples, tokenizer, label_map, max_seq_length, max_span_num, max_doc_length, max_query_length): if "[CLS]" in self.tokenizer.get_vocab(): start_token = "[CLS]" end_token = "[SEP]" else: start_token = "<s>" end_token = "</s>" features = [] total = len(examples) for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] all_doc_tokens = example.doc_tokens all_doc_boxes_tokens = example.doc_boxes cls_token_box = [0, 0, 0, 0] sep_token_box = [1000, 1000, 1000, 1000] pad_token_box = [0, 0, 0, 0] ques_token_box = [0, 0, 0, 0] # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple("DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += length spans_input_ids = [] spans_input_mask = [] spans_segment_ids = [] spans_boxes_tokens = [] for (doc_span_index, doc_span) in enumerate(doc_spans): if doc_span_index == max_span_num: break tokens = [] boxes_tokens = [] token_is_max_context = {} segment_ids = [] tokens.append(start_token) boxes_tokens.append(cls_token_box) segment_ids.append(0) for token in query_tokens: tokens.append(token) boxes_tokens.append(ques_token_box) segment_ids.append(0) tokens.append(end_token) boxes_tokens.append(sep_token_box) segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i is_max_context = self.check_is_max_context( doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) boxes_tokens.append(all_doc_boxes_tokens[split_token_index]) segment_ids.append(0) tokens.append(end_token) boxes_tokens.append(sep_token_box) segment_ids.append(0) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) boxes_tokens.append(pad_token_box) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(boxes_tokens) == max_seq_length spans_input_ids.append(input_ids) spans_input_mask.append(input_mask) spans_segment_ids.append(segment_ids) spans_boxes_tokens.append(boxes_tokens) # Padding # padding spans # max_span_num: max_seg_num # spans_input_ids: the tokens in each segment if len(spans_input_ids) > max_span_num: spans_input_ids = spans_input_ids[0:max_span_num] spans_input_mask = spans_input_mask[0:max_span_num] spans_segment_ids = spans_segment_ids[0:max_span_num] spans_boxes_tokens = spans_boxes_tokens[0:max_span_num] while len(spans_input_ids) < max_span_num: tokens = [] boxes_tokens = [] segment_ids = [] tokens.append(start_token) boxes_tokens.append(cls_token_box) segment_ids.append(0) for token in query_tokens: tokens.append(token) boxes_tokens.append(ques_token_box) segment_ids.append(0) tokens.append(end_token) boxes_tokens.append(sep_token_box) segment_ids.append(0) tokens.append(end_token) boxes_tokens.append(sep_token_box) segment_ids.append(0) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) boxes_tokens.append(pad_token_box) spans_input_ids.append(input_ids) spans_input_mask.append(input_mask) spans_segment_ids.append(segment_ids) spans_boxes_tokens.append(boxes_tokens) # padding labels labels = example.labels sep_id = tokenizer.convert_tokens_to_ids(end_token) labels = ["O"] * (spans_input_ids[0].index(sep_id) + 1) + labels if len(labels) > 512: labels = labels[:512] if len(labels) < 512: labels += ["O"] * (512 - len(labels)) assert len(spans_input_ids[0]) == len(labels) label_ids = [] for lid, l in enumerate(labels): if l not in label_map: label_ids.append(0) else: label_ids.append(label_map[l]) feature = DocVQAFeatures( example_index=example_index, input_ids=spans_input_ids, input_mask=spans_input_mask, segment_ids=spans_segment_ids, boxes=spans_boxes_tokens, label=label_ids, ) features.append(feature) return features def create_examples(self, data, is_test=False): """Creates examples for the training and dev sets.""" examples = [] for sample in tqdm(data, total=len(data)): question = sample["question"] doc_tokens = sample["document"] doc_boxes = sample["document_bbox"] labels = sample['labels'] if not is_test else [] x_min, y_min = min(doc_boxes, key=lambda x: x[0])[0], min( doc_boxes, key=lambda x: x[2])[2] x_max, y_max = max(doc_boxes, key=lambda x: x[1])[1], max( doc_boxes, key=lambda x: x[3])[3] width = x_max - x_min height = y_max - y_min if max(width, height) < 1000: scale_x = 1 scale_y = 1 else: scale_x = 1000 / max(width, height) scale_y = 1000 / max(width, height) scaled_doc_boxes = [[ round((b[0] - x_min) * scale_x), round((b[2] - y_min) * scale_y), round((b[1] - x_min) * scale_x), round((b[3] - y_min) * scale_y) ] for b in doc_boxes] for box, oribox in zip(scaled_doc_boxes, doc_boxes): if box[0] < 0: print(box, oribox) if box[2] - box[0] < 0: print(box, oribox) if box[3] - box[1] < 0: print(box, oribox) for pos in box: if pos > 1000: print(width, height, box, oribox) example = DocVQAExample(question=question, doc_tokens=doc_tokens, doc_boxes=scaled_doc_boxes, labels=labels) examples.append(example) return examples def docvqa_input(self): data = [] if self.args.do_train: dataset = self.args.train_file elif self.args.do_test: dataset = self.args.test_file with open(dataset, 'r', encoding='utf8') as f: for index, line in enumerate(f): data.append(json.loads(line.strip())) # read the examples from train/test xlm files examples = self.create_examples(data, is_test=self.args.do_test) features = self.convert_examples_to_features( examples, self.tokenizer, self.label2id_map, max_seq_length=self.max_seq_len, max_doc_length=self.max_doc_length, max_span_num=self.max_span_num, max_query_length=self.max_query_length) all_input_ids = paddle.to_tensor([f.input_ids for f in features], dtype="int64") all_input_mask = paddle.to_tensor([f.input_mask for f in features], dtype="int64") all_segment_ids = paddle.to_tensor([f.segment_ids for f in features], dtype="int64") all_bboxes = paddle.to_tensor([f.boxes for f in features], dtype="int64") all_labels = paddle.to_tensor([f.label for f in features], dtype="int64") self.sample_list = [ np.array(all_input_ids), np.array(all_input_mask), np.array(all_segment_ids), np.array(all_bboxes), np.array(all_labels) ] def __getitem__(self, idx): return self.sample_list[0][idx], self.sample_list[1][ idx], self.sample_list[2][idx], self.sample_list[3][ idx], self.sample_list[4][idx] def __len__(self, ): return self.sample_list[0].shape[0]
#coding:utf-8 # # id: bugs.core_4403 # title: Allow referencing cursors as record variables in PSQL # decription: # tracker_id: CORE-4403 # min_versions: ['3.0'] # versions: 3.0, 4.0 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0 # resources: None substitutions_1 = [] init_script_1 = """ recreate table t1(id int primary key, x int, y int); recreate table t2(id int primary key, x int, y int); recreate table t3(id int primary key, x int, y int); commit; insert into t1 values(1, 10, 11); commit; insert into t2 values(2, 10, 22); commit; insert into t3 values(3, 10, 33); commit; set term ^; create or alter procedure sp_test(a_x int) returns(o_y int) as begin o_y = 2 * a_x; suspend; end ^ set term ;^ commit; """ db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = """ set list on; set term ^; execute block returns( t1_id int, t1_x int, t1_y int ,t2_id int, t2_x int, t2_y int ,t3_id int, t3_x int, t3_y int ) as begin for select id, x, y from t1 as cursor c1 do begin for select id, x, y from t2 where x = :c1.x as cursor c2 do begin for select id, x, y from t3 where x = :c1.x as cursor c3 do begin t1_id = c1.id; t1_x = c1.x; t1_y = c1.y; t2_id = c2.id; t2_x = c2.x; t2_y = c2.y; t3_id = c3.id; t3_x = c3.x; t3_y = c3.y; suspend; end end end end ^ -- This should raise exception "attempted update of read-only column", sample has been taken from: -- sql.ru/forum/actualutils.aspx?action=gotomsg&tid=1158905&msg=17704102 execute block as begin for select x, y from t1 as cursor ce do ce.x = ce.y + 1; end ^ --/******************** --### 29.05.2015. TODO ### UNCOMMENT LATER, AFTER FIX CORE-4819. CURRENTLY IT LEADS FB TO HANG / CRASH. -- Uncomment 06.08.2018: execute block returns(old_y int, new_y int) as begin for select x, y from t1 as cursor ce do begin old_y = ce.y; execute procedure sp_test(ce.x) returning_values(ce.y); new_y = ce.y; suspend; end end ^ -- ********************/ set term ;^ commit; set list off; """ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ T1_ID 1 T1_X 10 T1_Y 11 T2_ID 2 T2_X 10 T2_Y 22 T3_ID 3 T3_X 10 T3_Y 33 """ expected_stderr_1 = """ Statement failed, SQLSTATE = 42000 attempted update of read-only column Statement failed, SQLSTATE = 42000 attempted update of read-only column """ @pytest.mark.version('>=3.0,<4.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.expected_stderr = expected_stderr_1 act_1.execute() assert act_1.clean_expected_stderr == act_1.clean_stderr assert act_1.clean_expected_stdout == act_1.clean_stdout # version: 4.0 # resources: None substitutions_2 = [] init_script_2 = """ recreate table t1(id int primary key, x int, y int); recreate table t2(id int primary key, x int, y int); recreate table t3(id int primary key, x int, y int); commit; insert into t1 values(1, 10, 11); commit; insert into t2 values(2, 10, 22); commit; insert into t3 values(3, 10, 33); commit; set term ^; create or alter procedure sp_test(a_x int) returns(o_y int) as begin o_y = 2 * a_x; suspend; end ^ set term ;^ commit; """ db_2 = db_factory(sql_dialect=3, init=init_script_2) test_script_2 = """ set list on; set term ^; execute block returns( t1_id int, t1_x int, t1_y int ,t2_id int, t2_x int, t2_y int ,t3_id int, t3_x int, t3_y int ) as begin for select id, x, y from t1 as cursor c1 do begin for select id, x, y from t2 where x = :c1.x as cursor c2 do begin for select id, x, y from t3 where x = :c1.x as cursor c3 do begin t1_id = c1.id; t1_x = c1.x; t1_y = c1.y; t2_id = c2.id; t2_x = c2.x; t2_y = c2.y; t3_id = c3.id; t3_x = c3.x; t3_y = c3.y; suspend; end end end end ^ -- This should raise exception "attempted update of read-only column", sample has been taken from: -- sql.ru/forum/actualutils.aspx?action=gotomsg&tid=1158905&msg=17704102 execute block as begin for select x, y from t1 as cursor ce do ce.x = ce.y + 1; end ^ --/******************** --### 29.05.2015. TODO ### UNCOMMENT LATER, AFTER FIX CORE-4819. CURRENTLY IT LEADS FB TO HANG / CRASH. -- Uncomment 06.08.2018: execute block returns(old_y int, new_y int) as begin for select x, y from t1 as cursor ce do begin old_y = ce.y; execute procedure sp_test(ce.x) returning_values(ce.y); new_y = ce.y; suspend; end end ^ --********************/ set term ;^ commit; set list off; """ act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2) expected_stdout_2 = """ T1_ID 1 T1_X 10 T1_Y 11 T2_ID 2 T2_X 10 T2_Y 22 T3_ID 3 T3_X 10 T3_Y 33 """ expected_stderr_2 = """ Statement failed, SQLSTATE = 42000 attempted update of read-only column CE.X Statement failed, SQLSTATE = 42000 attempted update of read-only column CE.Y """ @pytest.mark.version('>=4.0') def test_2(act_2: Action): act_2.expected_stdout = expected_stdout_2 act_2.expected_stderr = expected_stderr_2 act_2.execute() assert act_2.clean_expected_stderr == act_2.clean_stderr assert act_2.clean_expected_stdout == act_2.clean_stdout
# (c) Copyright [2017] Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The main entry point script for this project. Experimenter takes as an input a specification of experiments, builds plan (just a set of experiments to run) and runs experiments one at a time. It accepts the following command line parameters:: $ python experimenter.py ACTION [parameters] * ``ACTION`` Action to perform. Valid actions are * **print-config** * **run** Build plan and run experiments. * **build** Only build plan. If file name specified, serialize to file. * **validate** Analyze plan - run several validation checks to make sure \ operating system is properly tuned (see :py:class:`~dlbs.validator.Validator` class for details) * Parameters * ``--config`` Configuration file (json) of an experiment. Will override values from default configuration. * ``--plan`` Pre-built plan of an experiment (json). If action is **build**, a file name to write plan to.\ If action is **run**, a file name to read plan from. * ``-P`` Parameters that override parameters in configuration file. For instance, ``-Pexp.phase='"inference"'``.\ Values must be json parsable (json.loads()). * ``-V`` Variables that override variables in configuration file in section "variables".\ These variables are used to generate different combinations of experiments.\ For instance: ``-Vexp.framework='["tensorflow", "caffe2"]'``. Values must be\ json parsable (json.loads()). * ``--log-level`` Python logging level. Valid values: "critical", "error", "warning", "info" and "debug" * ``--discard-default-config`` Do not load default configuration. * ``-E`` Extensions to add. Can be usefull to quickly customize experiments. Must be valid json\ parsable array element for "extension" array. Example: Load default configuration, pretty print it to a command line and exit. Without other arguments, it will print default configuration. Parameters and variables defined in configuration files will not be evaluated. The 'print-config' just prints what's inside configuration files i.e. parameters/variables passed via comamnd line arguments will not be included:: $ python experimenter.py print-config --log-level=debug Example: There are two types of variables. The first type is **parameter** variables or just parameter. These parameters do not contribute to generating different experiments and may be common to all experiments. It's possible to specify them on a command line. All values of such paarmeters must be json parsable (json.loads()):: $ python experimenter.py build --discard-default-config --log-level=debug \\ $ -Pstr.greeting='"Hello World!"' -Pint.value=3 \\ $ -Pfloat.value=3.4343 -Plist.value='["1", "2", "3"]' \\ $ -Plist.value2='[100,101,102]' Example: A minimal working example to run BVLC Caffe. Run one experiment and store results in a file. If you run multiple experiments, you really want to make sure that experiment log file is different for every experiment (assuming you run it from DLBS_ROOT/tutorials/dlcookbook):: $ export BENCH_ROOT=$( cd $( dirname "${BASH_SOURCE[0]}" ) && pwd ) $ export CUDA_CACHE_PATH=/dev/shm/cuda_cache $ . ${BENCH_ROOT}/../../scripts/environment.sh $ script=$DLBS_ROOT/python/dlbs/experimenter.py $ $ python experimenter.py run --log-level=debug \\ $ -Pexp.framework='"bvlc_caffe"' \\ $ -Pexp.env='"docker"' \\ $ -Pexp.gpus='0' \\ $ -Pexp.model='"alexnet"' \\ $ -Pexp.device_batch='"16"'\\ $ -Pexp.log_file='"${BENCH_ROOT}/${caffe.fork}_caffe/training.log"' """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import logging import argparse import json import copy from dlbs.builder import Builder from dlbs.launcher import Launcher from dlbs.utils import DictUtils from dlbs.utils import ConfigurationLoader from dlbs.validator import Validator from dlbs.processor import Processor from dlbs.help.helper import Helper from dlbs.sysinfo.systemconfig import SysInfo class Experimenter(object): """Class that generates configurations and runs experiments""" ACTIONS = ['print-config', 'run', 'build', 'validate'] def __init__(self): self.__validation = True # Validate config before running benchmarks self.__action = None # Action to perform (build, run, ...) self.__config_file = None # Configuration file to load self.__progress_file = None # A JSON file with current progress self.__config = {} # Loaded configuration self.__param_info = {} # Parameter meta-info such as type and value domain self.__plan_file = None # File with pre-built plan self.__plan = [] # Loaded or generated plan self.__params = {} # Override env variables from files self.__variables = {} # Override variables from files self.__initialized = False # Experimenter can be initialized only once # Dirty hacks for var in ('CUDA_CACHE_PATH', 'http_proxy', 'https_proxy'): DictUtils.ensure_exists(os.environ, var, '') DictUtils.ensure_exists( os.environ, 'DLBS_ROOT', os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../') ) @property def validation(self): """Do we need to perform validation.""" return self.__validation @validation.setter def validation(self, validation): """Set validation.""" self.__validation = validation @property def action(self): """Get current action.""" return self.__action @action.setter def action(self, action): """Set current action.""" if action not in Experimenter.ACTIONS: raise ValueError('Invalid value for action (%s). Must be one of %s' % (action, str(Experimenter.ACTIONS))) self.__action = action @property def config_file(self): """Get configuration file.""" return self.__config_file @config_file.setter def config_file(self, config_file): """Set configuration file.""" self.__config_file = config_file @property def config(self): """Get configuration.""" return self.__config @config.setter def config(self, config): """Set configuration.""" self.__config = config @property def param_info(self): """Get parameters info dictionary.""" return self.__param_info @param_info.setter def param_info(self, param_info): """Set parameters info dictionary.""" self.__param_info = param_info @property def plan_file(self): """Set plan file.""" return self.__plan_file @plan_file.setter def plan_file(self, plan_file): """Get plan file.""" self.__plan_file = plan_file @property def plan(self): """Get plan.""" return self.__plan @plan.setter def plan(self, plan): """Set plan.""" self.__plan = plan @property def params(self): """Get parameters.""" return self.__params @property def variables(self): """Get variables.""" return self.__variables def init(self, **kwargs): """Initializes experimenter. Args: **kwargs (dict): Optional initialization parameters: - action (str): Action to perform. - config (str): A user-provided configuration file. - plan (str): A file for generated benchmark plan. - no_validation (bool): If true, do not perform validation - progress_file (str): A path to progress file (if not None, enables progress reporting). - params (dict): User defined parameters. - vars (dict): User defined variables. - discard_default_config (bool): If True, do not load standard DLBS config. - extensions (dict): User provided extensions. User provided parameters (`params`), variables (`vars`) and extensions (`extensions`) overwrite values defined in user configuration files (`config`) if it is present. Information defined in a uses-provided configuration file (`config`) overwrites standard DLBS configuration. """ if self.__initialized: raise RuntimeError("Experimenter can only be initialized once.") self.action = DictUtils.get(kwargs, 'action', 'run') self.config_file = DictUtils.get(kwargs, 'config', None) self.plan_file = DictUtils.get(kwargs, 'plan', None) self.validation = not DictUtils.get(kwargs, 'no_validation', False) self.__progress_file = DictUtils.get(kwargs, 'progress_file', None) # Get parameters and variables from a command line/user-provided self.params.update(DictUtils.get(kwargs, 'params', {})) self.variables.update(DictUtils.get(kwargs, 'vars', {})) # Load default configuration if not DictUtils.get(kwargs, 'discard_default_config', False): logging.debug("Loading default configuration") _, self.config, self.param_info = ConfigurationLoader.load( os.path.join(os.path.dirname(__file__), 'configs') ) # Load configurations specified on a command line self.load_configuration() # Add extensions from command line DictUtils.ensure_exists(self.config, 'extensions', []) self.config['extensions'].extend(DictUtils.get(kwargs, 'extensions', [])) # All's done self.__initialized = True def load_configuration(self): """Loads configuration specified by a user on a command line. At this moment, DLBS has already loaded standard configuration (if `discard_default_config` flag is not present). DLBS will try to load user configuration from `config` file (if not None) overwriting default parameters. Then, it will try to load user provided parameters (`params`, `vars` and `extensions`) that will overwrite existing configuration. If `plan` file is present, it will be loaded if `action` is `run`. """ if self.config_file is not None: logging.debug('Loading configuration from: %s', self.config_file) with open(self.config_file) as file_obj: user_config = json.load(file_obj) # Update parameter information from user configuration. ConfigurationLoader.update_param_info(self.param_info, user_config, is_user_config=True) # Update existing benchmark configuration. ConfigurationLoader.update(self.config, ConfigurationLoader.remove_info(user_config)) if self.plan_file is not None and self.action == 'run': logging.debug('Loading plan from: %s', self.plan_file) with open(self.plan_file) as plan_file: self.plan = json.load(plan_file) def execute(self): """Executed requested action.""" if self.action == 'print-config': json.dump(self.config, sys.stdout, indent=4, sort_keys=True) print('') elif self.action == 'build': self.build_plan(serialize=True) elif self.action == 'run': self.build_plan() logging.info("Plan was built with %d experiments", len(self.plan)) Processor(self.param_info).compute_variables(self.plan) if self.validation: validator = Validator(self.plan) validator.validate() if not validator.plan_ok: validator.report() logging.warn("---------------------------------------------------------------------------") logging.warn("- Benchmark plan has not been validated. See reason (s) above. -") logging.warn("- If you believe validator is wrong (what can very well be the case), -") logging.warn("- rerun experimenter with `--no-validation` flag e.g.: -") logging.warn("- python ./python/dlbs/experimenter.py run --no-validation ... -") logging.warn("---------------------------------------------------------------------------") else: logging.info("Benchmark plan has been validated") if not self.validation or validator.plan_ok: Launcher.run(self.plan, self.__progress_file) elif self.action == 'validate': self.build_plan() Processor(self.param_info).compute_variables(self.plan) validator = Validator(self.plan) validator.validate() validator.report() def build_plan(self, serialize=False): """Builds plan combining configuration, parameters and variables.""" self.plan = Builder.build(self.config, self.params, self.variables) if serialize: if self.plan_file: DictUtils.dump_json_to_file(self.plan, self.plan_file) else: json.dump(self.plan, sys.stdout, indent=4) print ('') def parse_arguments(): """Parse command line arguments Returns: dict: Dictionary with command line arguments. """ parser = argparse.ArgumentParser() parser.add_argument('action', type=str, help="Action to perform. Valid actions: 'print-config', 'run', 'build' and 'analyze-plan'.") parser.add_argument('--config', required=False, type=str, help="Configuration file (json) of an experiment. Will override values from " "default configuration.") parser.add_argument('--plan', required=False, type=str, help="Pre-built plan of an experiment (json file). If action is 'build', a file name to write " "plan to. If action is 'run', a file name to read plan from.") parser.add_argument('--progress_file', '--progress-file', required=False, type=str, default=None, help="A JSON file that experimenter will be updating on its progress. " "If not present, no progress info will be available. " "Put it somewhere in /dev/shm") parser.add_argument('-P', action='append', required=False, default=[], help="Parameters that override parameters in configuration file. " "For instance, -Pexp.phase=2. Values must be json parsable (json.loads()).") parser.add_argument('-V', action='append', required=False, default=[], help="Variables that override variables in configuration file in section 'variables'. " "These variables are used to generate different combinations of experiments. " "For instance: -Vexp.framework='[\"tensorflow\", \"caffe2\"]'. " "Values must be json parsable (json.loads()).") parser.add_argument('--log_level', '--log-level', required=False, default='info', help='Python logging level. Valid values: "critical", "error", "warning", "info" and "debug"') parser.add_argument('--discard_default_config', '--discard-default-config', required=False, default=False, action='store_true', help='Do not load default configuration.') parser.add_argument('--no_validation', '--no-validation', required=False, default=False, action='store_true', help='Do not perform config validation before running benchmarks.') parser.add_argument('-E', action='append', required=False, default=[], help="Extensions to add. Can be useful to quickly customize experiments. " "Must be valid json parsable array element for 'extension' array.") args = parser.parse_args() return vars(args) def parse_json_arguments(args): """Parse parameters, variables and extensions. Args: args (dict): Dictionary of command line arguments returned by `parse_arguments`. Is not modified. Returns: A tuple of (params, variables, extensions): - `params` is a dictionary of parameters (all params in args['P']) - `variables` is a dictionary of variables (all vars in args['V']) - `extensions` is a list of dictionaries (all extensions in in args['E']) """ for param in ('P', 'V', 'E'): DictUtils.ensure_exists(args, param, []) params, variables, extensions = ({}, {}, []) DictUtils.add(params, args['P'], pattern='(.+?(?=[=]))=(.+)', must_match=True) DictUtils.add(variables, args['V'], pattern='(.+?(?=[=]))=(.+)', must_match=True) for extension in args['E']: try: extensions.append(json.loads(extension)) except Exception as err: logging.warn("Found non-json parsable extension: %s", extension) raise err return params, variables, extensions def update_arguments(args, json_args): """ Update `args` with data from `json_args`. Args: args (dict): Dictionary of command line arguments: - Keys 'P', 'V' and 'E' are removed. - Keys 'params', 'vars' and 'extensions' are created with values from `json_args`. json_args (tuple): A tuple returned by `parse_json_arguments` - (params, variables, extensions) Returns: dict: Updated copy of `args`. """ assert isinstance(json_args, tuple) and len(json_args) == 3, "Invalid type of a function argument" args_copy = copy.deepcopy(args) for param in ('P', 'V', 'E'): del args_copy[param] for idx, param in enumerate(['params', 'vars', 'extensions']): args_copy[param] = copy.deepcopy(json_args[idx]) return args_copy def init_logger(log_level): """Initialize logger.""" if log_level is None: return log_level = logging.getLevelName(log_level.upper()) logging.debug("Initializing logger to level %s", log_level) root = logging.getLogger() root.setLevel(log_level) handler = logging.StreamHandler(sys.stdout) handler.setLevel(log_level) root.addHandler(handler) if __name__ == '__main__': if len(sys.argv) < 2: raise ValueError("Missing mandatory parameter `action`.") if sys.argv[1] == 'help': Helper.main() elif sys.argv[1] == 'sysinfo': print(json.dumps(SysInfo().collect(), indent=2)) else: args = parse_arguments() init_logger(args['log_level']) args = update_arguments(args, parse_json_arguments(args)) experimenter = Experimenter() experimenter.init(**args) experimenter.execute()
from mental_models import utils from itertools import combinations from collections import defaultdict class AutoMap(object): def __init__(self, text=None, nlp=None, delete_list=None): nlp = nlp if not nlp: nlp = utils.nlp_en self.raw_text = text.strip()\ .replace('\n', '')\ .replace('.', '')\ .replace(',', '')\ .replace('(', '')\ .replace(')', '') self.text = nlp(self.raw_text) self.delete_list = delete_list if not delete_list: # self.delete_list = set(None) ? self.delete_list = set(nlp.Defaults.stop_words) self.text_concepts =\ [token for token in self.text if not (token.text.lower() in self.delete_list or token.is_punct or token.is_space)] self.text_concepts_length = len(self.text_concepts) def get_statements(self): # kinda wordy, lets take out self get_higher_or_text_concept = self.get_higher_or_text_concept def replace_with_concepts(concepts): for window_size in concepts: for index in range(len(concepts[window_size])): # here we update the text to a concept concepts[window_size][index] =\ (get_higher_or_text_concept( concepts[window_size][index][0]), get_higher_or_text_concept( concepts[window_size][index][1])) return concepts response = {} direct = defaultdict(list) rhetorical = defaultdict(list) offset_index = 1 word_index = 0 words_with_offsets =\ [word_offset for word_offset in zip( self.text_concepts, range(self.text_concepts_length) )] all_combinations_with_offsets =\ [(pair[0][word_index], pair[1][word_index], pair[1][offset_index] - pair[0][offset_index]) for pair in combinations(words_with_offsets, 2)] for concept_a, concept_b, offset in all_combinations_with_offsets: direct[offset].append((concept_a, concept_b)) # now we construct the rhetorical version by throwing out any pair # that includes a stopword (.is_stop). Since we don't use the delete_list # in practice I just check .is_stop for window_size in direct.keys(): for item in direct[window_size]: rhetorical[window_size] =\ [item for item in direct[window_size] if not (item[0].is_stop or item[1].is_stop)] # now we need to replace the text with its concept across both versions # note: I could optimize this by doing this directly in the step above response['direct'] = replace_with_concepts(direct) response['rhetorical'] = replace_with_concepts(rhetorical) return response def get_higher_or_text_concept(self, token): ret = token.text if not token.is_stop: #synset = token._.wordnet.synsets()[0] synset_lookup = token._.wordnet.synsets() if synset_lookup: synset = synset_lookup[0] hypernyms = synset.hypernyms() if not hypernyms: hypernyms = synset.root_hypernyms() if hypernyms: ret = hypernyms[0].name() return ret def get_concepts(self, want_higher_concepts=False, want_text_concepts=False, include_text_concepts=False): def have_a_higher_concept_or_text_concept(is_stop, include_text_concepts): ret = False if not is_stop: ret = True if is_stop and include_text_concepts: ret = True return ret # kinda wordy, lets take out self get_higher_or_text_concept = self.get_higher_or_text_concept response = {} higher_concepts = None if want_higher_concepts: # here I include filter logic that surfaces any text concepts # that sare spaCy stop words; this way some words come to the surface # beacuse otherwise WordNet has a higher concept for everything print("in higher concepts") higher_concepts = [ get_higher_or_text_concept(token) for token in self.text_concepts if have_a_higher_concept_or_text_concept(token.is_stop, include_text_concepts)] # package up data into the response object ... if want_text_concepts: response['text_concepts'] = self.text_concepts if want_higher_concepts: response['higher_concepts'] = higher_concepts return response
<filename>10_for.py<gh_stars>0 # chapter04_02 # For 반복문 # for in <집합의모음(튜플 리스트 딕셔너리 등등)>: # (반복문) 형식 for v1 in range(10): # 0부터시작해서 9까지 print('v1 is :', v1) for v2 in range(1, 11): # 1부터 10까지 print('v2 is', v2) for v3 in range(1, 11, 2): # 1부터 10까지중 2단위로 print('v3 is', v3) # 1~1000 합 sum1 = 0 for v in range(1, 1001): sum1 += v # sum 함수로 했더라도 디버깅과정에서는 for반복문을통해 검사하는것도 가능 print('1 ~ 1000 sum : ' , sum1) print() print('1 ~ 1000 sum: ' , sum(range(1, 1001))) # sum 함수자체의 기능으로 쉽게가능 # range(a,b) ab가 정수라면 정수의 list를 만들어줌 print('1 ~ 1000 4 :', sum(range(1, 1001, 4))) # Iterables = 반복할수 있는 객체 -> __iter__ 인듯 # string, list, tuple, set, dict 가능 # iterable return funtion : range, reversed, enumerate, filter, map, zip # 리스트 가능 names = ['Lee', 'Park', 'Cap', 'Kim'] for name in names: print('You are :', name) lotto = [11, 19, 21, 28 ,36] for n in lotto: print('Num :', n) # 문자열 가능 word = "beautiful" for s in word: print('word :', s) # dict 가능 my_info = { "name" : 'Lee', "Age" : 30, "City" : 'San' } for k in my_info: # dict 반복시 value가 아닌 key를 반복함// print('key:', k) # value를 하려면 k 대신 my_info[k] print('value:', my_info.get(k)) # get 메소드를 사용해도 쉽게가능### for v in my_info.values(): # value 메소드로 더 간단하게 가능 #####best print('value:', v) ############################################################## # if 와 for 문을 같이 사용해보기 name = 'FineaPplE' for n in name: if n.isupper(): print(n) else: print(n.upper()) # 메소드에대해 외울 필요가있을듯 # break = 뒷부분은 실행되지 X # 알고리즘에서의 순차검색 ( 리스트에서 원하는 자료를 찾기위해 n번 다 수행) # 불필요한 뒷부분 과정을 생략하여 메모리 절약가능 numbers = [14, 3, 4, 7, 10, 24, 4, 1, 11, 34] for num in numbers: if num == 4: print('Found: 4!') break else: print('Not found :', num) # continue : 반복문을 멈추지않고 다시 출발선 lt = ["1", 2, 5, True, 4.1, complex(4)] for v in lt: if type(v) is bool:# 자료형을 비교할때는 is를 사용 /// continue print("current type:",v , type(v)) ## break 와 continue 사용에대해 고민해보 # for -else numbers = [14, 3, 4, 7, 10, 24, 4, 1, 11, 34] for num in numbers: if num == 24: print("Found : 24") break else: print('Not found:24') # -->> for 문에서 break를만나 종료되면 else는 실행되지 X # 연습해보기 reverse for i in range(2, 10): for j in range(1, 10): print('{:4d}'.format(i *j), end='') print() # 변환 nick = 'Bongman' print('Reversed', reversed(nick)) # id 값이나옴 ; print('List', list(reversed(nick))) print('tuple', tuple(reversed(nick))) print('Set', set(reversed(nick))) # 순서없음 set의 특징
<reponame>liangleslie/core """Support for ONVIF Cameras with FFmpeg as decoder.""" from __future__ import annotations from haffmpeg.camera import CameraMjpeg from onvif.exceptions import ONVIFError import voluptuous as vol from yarl import URL from homeassistant.components import ffmpeg from homeassistant.components.camera import Camera, CameraEntityFeature from homeassistant.components.ffmpeg import CONF_EXTRA_ARGUMENTS, get_ffmpeg_manager from homeassistant.components.stream import ( CONF_RTSP_TRANSPORT, CONF_USE_WALLCLOCK_AS_TIMESTAMPS, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import HTTP_BASIC_AUTHENTICATION from homeassistant.core import HomeAssistant from homeassistant.helpers import config_validation as cv, entity_platform from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream from homeassistant.helpers.entity_platform import AddEntitiesCallback from .base import ONVIFBaseEntity from .const import ( ABSOLUTE_MOVE, ATTR_CONTINUOUS_DURATION, ATTR_DISTANCE, ATTR_MOVE_MODE, ATTR_PAN, ATTR_PRESET, ATTR_SPEED, ATTR_TILT, ATTR_ZOOM, CONF_SNAPSHOT_AUTH, CONTINUOUS_MOVE, DIR_DOWN, DIR_LEFT, DIR_RIGHT, DIR_UP, DOMAIN, GOTOPRESET_MOVE, LOGGER, RELATIVE_MOVE, SERVICE_PTZ, STOP_MOVE, ZOOM_IN, ZOOM_OUT, ) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the ONVIF camera video stream.""" platform = entity_platform.async_get_current_platform() # Create PTZ service platform.async_register_entity_service( SERVICE_PTZ, { vol.Optional(ATTR_PAN): vol.In([DIR_LEFT, DIR_RIGHT]), vol.Optional(ATTR_TILT): vol.In([DIR_UP, DIR_DOWN]), vol.Optional(ATTR_ZOOM): vol.In([ZOOM_OUT, ZOOM_IN]), vol.Optional(ATTR_DISTANCE, default=0.1): cv.small_float, vol.Optional(ATTR_SPEED, default=0.5): cv.small_float, vol.Optional(ATTR_MOVE_MODE, default=RELATIVE_MOVE): vol.In( [ CONTINUOUS_MOVE, RELATIVE_MOVE, ABSOLUTE_MOVE, GOTOPRESET_MOVE, STOP_MOVE, ] ), vol.Optional(ATTR_CONTINUOUS_DURATION, default=0.5): cv.small_float, vol.Optional(ATTR_PRESET, default="0"): cv.string, }, "async_perform_ptz", ) device = hass.data[DOMAIN][config_entry.unique_id] async_add_entities( [ONVIFCameraEntity(device, profile) for profile in device.profiles] ) return True class ONVIFCameraEntity(ONVIFBaseEntity, Camera): """Representation of an ONVIF camera.""" _attr_supported_features = CameraEntityFeature.STREAM def __init__(self, device, profile): """Initialize ONVIF camera entity.""" ONVIFBaseEntity.__init__(self, device, profile) Camera.__init__(self) self.stream_options[CONF_RTSP_TRANSPORT] = device.config_entry.options.get( CONF_RTSP_TRANSPORT ) self.stream_options[ CONF_USE_WALLCLOCK_AS_TIMESTAMPS ] = device.config_entry.options.get(CONF_USE_WALLCLOCK_AS_TIMESTAMPS, False) self._basic_auth = ( device.config_entry.data.get(CONF_SNAPSHOT_AUTH) == HTTP_BASIC_AUTHENTICATION ) self._stream_uri = None @property def name(self) -> str: """Return the name of this camera.""" return f"{self.device.name} {self.profile.name}" @property def unique_id(self) -> str: """Return a unique ID.""" if self.profile.index: return f"{self.device.info.mac or self.device.info.serial_number}_{self.profile.index}" return self.device.info.mac or self.device.info.serial_number @property def entity_registry_enabled_default(self) -> bool: """Return if the entity should be enabled when first added to the entity registry.""" return self.device.max_resolution == self.profile.video.resolution.width async def stream_source(self): """Return the stream source.""" return self._stream_uri async def async_camera_image( self, width: int | None = None, height: int | None = None ) -> bytes | None: """Return a still image response from the camera.""" image = None if self.device.capabilities.snapshot: try: image = await self.device.device.get_snapshot( self.profile.token, self._basic_auth ) except ONVIFError as err: LOGGER.error( "Fetch snapshot image failed from %s, falling back to FFmpeg; %s", self.device.name, err, ) if image is None: return await ffmpeg.async_get_image( self.hass, self._stream_uri, extra_cmd=self.device.config_entry.options.get(CONF_EXTRA_ARGUMENTS), width=width, height=height, ) return image async def handle_async_mjpeg_stream(self, request): """Generate an HTTP MJPEG stream from the camera.""" LOGGER.debug("Handling mjpeg stream from camera '%s'", self.device.name) ffmpeg_manager = get_ffmpeg_manager(self.hass) stream = CameraMjpeg(ffmpeg_manager.binary) await stream.open_camera( self._stream_uri, extra_cmd=self.device.config_entry.options.get(CONF_EXTRA_ARGUMENTS), ) try: stream_reader = await stream.get_reader() return await async_aiohttp_proxy_stream( self.hass, request, stream_reader, ffmpeg_manager.ffmpeg_stream_content_type, ) finally: await stream.close() async def async_added_to_hass(self): """Run when entity about to be added to hass.""" uri_no_auth = await self.device.async_get_stream_uri(self.profile) url = URL(uri_no_auth) url = url.with_user(self.device.username) url = url.with_password(self.device.password) self._stream_uri = str(url) async def async_perform_ptz( self, distance, speed, move_mode, continuous_duration, preset, pan=None, tilt=None, zoom=None, ) -> None: """Perform a PTZ action on the camera.""" await self.device.async_perform_ptz( self.profile, distance, speed, move_mode, continuous_duration, preset, pan, tilt, zoom, )
<gh_stars>0 # -*- coding: utf-8 -*- from sklearn.datasets import fetch_olivetti_faces from sklearn.model_selection import train_test_split from os import mkdir, listdir, getcwd from os.path import join, exists from cv2 import imwrite from shutil import rmtree from torchvision.datasets import ImageFolder from torchvision.transforms import RandomHorizontalFlip from torchvision.transforms import Compose, ToTensor from torchvision.transforms import Grayscale from torch.utils.data import DataLoader from torch.nn import Module, Conv2d, Dropout2d, Linear from torch.nn.functional import relu, max_pool2d from torch.nn.functional import log_softmax, nll_loss from torch import flatten, manual_seed, device, save from torch import no_grad from torch.optim.lr_scheduler import StepLR from torch.optim.adadelta import Adadelta from torch.cuda import is_available from argparse import ArgumentParser from keras.preprocessing.image import load_img, img_to_array from keras.preprocessing.image import ImageDataGenerator from imutils.paths import list_images from matplotlib.pyplot import plot, legend, show, savefig from matplotlib.pyplot import suptitle, subplots class Net(Module): def __init__(self): super(Net, self).__init__() self.conv1 = Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=1) self.conv2 = Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1) self.dropout1 = Dropout2d(p=0.5) self.fc1 = Linear(in_features=12544, out_features=128) self.fc2 = Linear(in_features=128, out_features=40) def forward(self, x): x = self.conv1(x) x = relu(x) x = max_pool2d(x, 2) x = self.conv2(x) x = relu(x) x = max_pool2d(x, 2) x = flatten(x, 1) x = self.fc1(x) x = relu(x) x = self.dropout1(x) x = self.fc2(x) out = log_softmax(x, dim=1) return out def train(argument_object, model, dev, train_loader, optimizer, epoch): """ Args: argument_object (Namespace): Network params model (Net): CNN model dev (device): If CUDA, enables GPU, CPU otherwise train_loader (DataLoader): Train dataset optimizer (Adadelta): Adadelta object epoch (int): Iteration number """ model.train() run_loss = 0 for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(dev), target.to(dev) optimizer.zero_grad() output = model(data) loss = nll_loss(output, target) loss.backward() optimizer.step() run_loss += loss.item() if batch_idx % argument_object.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\t' 'Loss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) return run_loss def test(model, dev, test_loader): """ Args: model (Net): CNN model dev (device): If CUDA, enables GPU, CPU otherwise test_loader (DataLoader): Test dataset """ model.eval() test_loss = 0 correct = 0 with no_grad(): for data, target in test_loader: data, target = data.to(dev), target.to(dev) output = model(data) test_loss += nll_loss(output, target, reduction='sum').item() pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, ' 'Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return test_loss def check_save(size, to_dir): """Assert save_image method works properly. Args: size (int): Size of (train-validation-test) set to_dir (string): Either train, validation or test directory. """ assert (size > 0) check_size = 0 entries = listdir(path=to_dir) for entry in entries: images = listdir(path=join(to_dir, entry)) check_size += len(images) assert (check_size == size) def save_image(size, label, to_dir, x): """ Args: size (int): Size of (train-validation-test) set label (string): Class names to_dir (string): Either train, validation or test directory. x (ndarray): image data """ current = str(label[0]) count = 0 for i in range(size): if current != str(label[i]): current = str(label[i]) if len(str(current)) == 1: current = '0' + str(current) label_dir = join(to_dir, current) if not exists(path=label_dir): mkdir(path=label_dir) name = join(label_dir, str(str(label[i]) + '_' + str(count) + '.png')) imwrite(filename=name, img=x[i] * 255) count += 1 check_save(size=size, to_dir=to_dir) def generate_and_save(data_gen, set_path, set_dir, gen_num, save_format=".png", save_prefix="gen_image"): """Populate images using data augmentation. Args: data_gen (ImageDataGenerator): Generator object set_path (str): set_dir (list): gen_num (int): save_format (str): save_prefix (str): """ for label in set_dir: if label != ".DS_Store": # Mac-os specific problem path = join(set_path, label) images = listdir(path=path) for img in images: if img != '.DS_Store': # Mac-os specific problem count_img = 1 img_path = join(path, img) loaded_img = load_img(path=img_path, color_mode="grayscale") array_img = img_to_array(img=loaded_img) current_img = array_img.reshape((1,) + array_img.shape) for _ in data_gen.flow(x=current_img, batch_size=1, save_to_dir=path, save_prefix=save_prefix, save_format=save_format): count_img += 1 if count_img > gen_num: break def create_folder(write_to_file): """Create train and test folders under data folder. If data folder is existed, deletes and recreates it. Args: write_to_file (bool, optional): If true, save all images to the data folder. Returns: tuple: (train, test) both are the directory paths. """ dir_data = "data" dir_train = join(dir_data, "train") dir_test = join(dir_data, "test") if write_to_file: if exists(path=dir_data): rmtree(path=dir_data) mkdir(path=dir_data) mkdir(path=dir_train) mkdir(path=dir_test) print("All sets (train-test) are available under" " the data folder:\n\n{}\n".format(getcwd())) return dir_train, dir_test def design_data(x, y, test_size): """ Args: x (ndarray): images y (ndarray): target test_size (float): Dividing dataset based on the ratio, remaining part will be the test set. Returns: training data, training target, test data, test target """ separated_data = train_test_split(x, y, test_size=test_size, random_state=42) x_train = separated_data[0] x_test = separated_data[1] y_train = separated_data[2] y_test = separated_data[3] x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 return x_train, x_test, y_train, y_test def get_images(test_size): """ Args: test_size (float): Dividing dataset based on the ratio, remaining part will be the test set. Returns: tuple: (train and test images) """ olivetti = fetch_olivetti_faces() images = olivetti.images target = olivetti.target train_image, test_image, _, _ = train_test_split(images, target, test_size=test_size, random_state=42) return train_image, test_image def split_data(test_size, generate_data, write_to_file=True): """ Args: test_size (float): Dividing dataset based on the ratio, remaining part will be the test set. generate_data (bool, optional): If true, generate and save all images to the olivetti folder. write_to_file (bool, optional): If true, save all images to the data folder. Returns: dict: (training data, training target, test data, test target) """ x, y = fetch_olivetti_faces(return_X_y=True) x_train, x_test, y_train, y_test = design_data(x=x, y=y, test_size=test_size) dir_train, dir_test = create_folder(write_to_file=write_to_file) size_train = x_train.shape[0] size_test = x_test.shape[0] train_folder_size = 0 test_folder_size = 0 img_row, img_col = 64, 64 train_images, test_images = get_images(test_size=test_size) if write_to_file: save_image(size=size_train, label=y_train, to_dir=dir_train, x=train_images) save_image(size=size_test, label=y_test, to_dir=dir_test, x=test_images) train_folder_size = len(list(list_images(basePath=dir_train))) test_folder_size = len(list(list_images(basePath=dir_test))) print("\nTrain folder images: {}".format(train_folder_size)) print("Test folder images: {}".format(test_folder_size)) if generate_data: train_num = int(60000 / train_folder_size) test_num = int(10000 / test_folder_size) train_data_gen = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_data_gen = ImageDataGenerator(rescale=1./255) example_img = train_images.reshape(train_images.shape[0], img_row, img_col, 1)[0] example_img = example_img.reshape((1,) + example_img.shape) count = 0 gen_number = 10 # generate 10 samples from the example image gen_images = [] gen_labels = [] for batch in train_data_gen.flow(x=example_img, batch_size=1): batch_reshaped = batch.reshape(batch.shape[1], batch.shape[2]) gen_images.append(batch_reshaped) gen_labels.append(y_train[0]) count += 1 if count > gen_number: break t_gen = "Generated Train Samples" f_gen = "gen_train_samples.png" display_generated_samples(2, 5, x=gen_images, y=gen_labels, t=t_gen, title="Id:{}", f_name=f_gen) print("\nTrain generation begins..") generate_and_save(data_gen=train_data_gen, set_path=dir_train, set_dir=listdir(path=dir_train), gen_num=train_num) train_generated = len(list(list_images(basePath=dir_train))) print("\nTrain generation end. Generated Train images: {}". format(train_generated)) print("\nTest generation begins..") generate_and_save(data_gen=test_data_gen, set_path=dir_test, set_dir=listdir(path=dir_test), gen_num=test_num) test_generated = len(list(list_images(basePath=dir_test))) print("\nTest generation end. Generated Test images: {}". format(test_generated)) transform = Compose(transforms=[Grayscale(num_output_channels=1), RandomHorizontalFlip(), ToTensor()]) train_dataset = ImageFolder(root=dir_train, transform=transform) test_dataset = ImageFolder(root=dir_test, transform=transform) data = {'train_dataset': train_dataset, 'training_label': y_train, 'test_dataset': test_dataset, 'test_label': y_test} return data def plot_loss(train_loss, test_loss): r"""Plot train and the test loss Args: train_loss (list): Training loss during epoch test_loss (list): Test loss during epoch """ plot(train_loss, label='Training loss') plot(test_loss, label='Test los') legend(frameon=False) savefig(fname="olivetti_loss.png", dpi=300) show() def display_generated_samples(n_row, n_col, x, y, t, title="Id:{}", fig_size=(6, 3), dpi=300, f_name="default.png"): """ Args: n_row (int): Row number n_col (int): Column number x (list): generated images y (list): labels t (str): Graph title title (str): Id title fig_size (tuple): figure size dpi (int): dots per inch f_name (str): file name """ fig, ax = subplots(nrows=n_row, ncols=n_col, figsize=fig_size, dpi=dpi) ax = ax.flatten() sample_num = n_row * n_col for i in range(sample_num): ax[i].imshow(X=x[i], cmap='gray') ax[i].set_xticks([]) ax[i].set_yticks([]) ax[i].set_title(title.format(y[i])) suptitle(t=t) savefig(f_name) def arguments(train_batch_size=64, test_batch_size=32, epochs=5, learning_rate=1.0, test_size=0.35, gamma=0.7, no_cuda=True, seed=1, log_interval=10, save_model=True, write_to_file=False, generate_data=False, log_dir='runs/olivetti_experiment'): """ Args: train_batch_size (int): Input batch size for training test_batch_size (int): Input batch size for testing epochs (int): Number of episodes for training learning_rate (float): Step size at each iteration test_size (float): Split ratio gamma (float): Learning rate step no_cuda (bool): If true, disables CUDA seed (int): Value of the random seed log_interval (int): Step to save batches save_model (bool): If true, saves model write_to_file (bool): If true, creates train/test dir generate_data (bool): If true, populate data log_dir (str): Tensorboard run location Returns: argument object """ parser = ArgumentParser(description="Olivetti Example") parser.add_argument('--train-batch-size', type=int, default=train_batch_size, metavar='N', help='input batch size for train' ' (default: {})'.format(train_batch_size)) parser.add_argument('--test-batch-size', type=int, default=test_batch_size, metavar='N', help='input batch size for test' ' (default: {})'.format(test_batch_size)) parser.add_argument('--epochs', type=int, default=epochs, metavar='N', help='number of epochs to train' ' (default: {})'.format(epochs)) parser.add_argument('--lr', type=float, default=learning_rate, metavar='LR', help='learning rate ' '(default: {})'.format(learning_rate)) parser.add_argument('--test-size', type=float, default=test_size, metavar='N', help='test size split ratio ' '(default: {})'.format(test_size)) parser.add_argument('--gamma', type=float, default=gamma, metavar='M', help='Learning rate step gamma ' '(default: {})'.format(gamma)) parser.add_argument('--no-cuda', action='store_true', default=no_cuda, help='disables CUDA training ' '(default: {})'.format(no_cuda)) parser.add_argument('--seed', type=int, default=seed, metavar='S', help='random seed (default: {})'.format(seed)) parser.add_argument('--log-interval', type=int, default=log_interval, metavar='N', help='how many batches to wait ' 'before logging training status ' '(default: {})'.format(log_interval)) parser.add_argument('--save-model', action='store_true', default=save_model, help='For Saving the current Model ' '(default: {})'.format(save_model)) parser.add_argument('--write-to-file', type=bool, metavar='F', default=write_to_file, help='Split dataset into the train and test directories') parser.add_argument('--generate-data', type=bool, metavar='G', default=generate_data, help='Populate data similar to the MNIST') parser.add_argument('--log-dir', action='store_true', default=log_dir, help='Tensorboard run location') argument_object = parser.parse_args() return argument_object def main(): args = arguments() manual_seed(seed=args.seed) use_cuda = not args.no_cuda and is_available() dev = device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} data = split_data(test_size=0.35, generate_data=args.generate_data, write_to_file=args.write_to_file) train_dataset = data['train_dataset'] test_dataset = data['test_dataset'] train_loader = DataLoader(dataset=train_dataset, batch_size=args.train_batch_size, shuffle=True, **kwargs) test_loader = DataLoader(dataset=test_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs) model = Net().to(device=dev) optimizer = Adadelta(params=model.parameters(), lr=args.lr, rho=0.9, eps=1e-6, weight_decay=0) scheduler = StepLR(optimizer=optimizer, step_size=1, gamma=args.gamma) train_los, test_los = [], [] for epoch in range(1, args.epochs + 1): tr_los = train(argument_object=args, model=model, dev=dev, train_loader=train_loader, optimizer=optimizer, epoch=epoch) te_los = test(model=model, dev=dev, test_loader=test_loader) scheduler.step(epoch=epoch) train_los.append(tr_los/len(train_loader)) test_los.append(te_los) if args.save_model: save(obj=model.state_dict(), f="olivetti_cnn.h5") if args.epochs > 1: plot_loss(train_loss=train_los, test_loss=test_los) if __name__ == '__main__': main()
"""Simple module providing a quaternion class for manipulating rotations easily. Note: all angles are assumed to be specified in radians. Note: this is an entirely separate implementation from the PyOpenGL quaternion class. This implementation assumes that Numeric python will be available, and provides only those methods and helpers commonly needed for manipulating rotations. """ from math import * from OpenGLContext.arrays import * from OpenGLContext import utilities def fromXYZR( x,y,z, r ): """Create a new quaternion from a VRML-style rotation x,y,z are the axis of rotation r is the rotation in radians.""" x,y,z = utilities.normalise( (x,y,z) ) return Quaternion ( array( [ cos(r/2.0), x*(sin(r/2.0)), y*(sin(r/2.0)), z*(sin(r/2.0)), ]) ) def fromEuler( x=0,y=0,z=0 ): """Create a new quaternion from a 3-element euler-angle rotation about x, then y, then z """ if x: base = fromXYZR( 1,0,0,x) if y: base = base * fromXYZR( 0,1,0,y) if z: base = base * fromXYZR( 0,0,1,z) return base elif y: base = fromXYZR( 0,1,0,y) if z: base = base * fromXYZR( 0,0,1,z) return base else: return fromXYZR( 0,0,1,z) class Quaternion(object): """Quaternion object implementing those methods required to be useful for OpenGL rendering (and not many others)""" __slots__ = ('internal','__weakref__') def __init__ (self, elements = [1,0,0,0] ): """The initializer is a four-element array, w, x,y,z -- all elements should be doubles/floats the default values are those for a unit multiplication quaternion. """ elements = asarray( elements, 'd') length = sqrt( sum( elements * elements)) if length != 1: elements = elements/length self.internal = elements def __mul__( self, other ): """Multiply this quaternion by another quaternion, generating a new quaternion which is the combination of the rotations represented by the two source quaternions. Other is interpreted as taking place within the coordinate space defined by this quaternion. Alternately, if "other" is a matrix, return the dot-product of that matrix with our matrix (i.e. rotate the coordinate) """ if hasattr( other, 'internal' ): w1,x1,y1,z1 = self.internal w2,x2,y2,z2 = other.internal w = w1*w2 - x1*x2 - y1*y2 - z1*z2 x = w1*x2 + x1*w2 + y1*z2 - z1*y2 y = w1*y2 + y1*w2 + z1*x2 - x1*z2 z = w1*z2 + z1*w2 + x1*y2 - y1*x2 return self.__class__( array([w,x,y,z],'d')) else: return dot( self.matrix (), other ) def XYZR( self ): """Get a VRML-style axis plus rotation form of the rotation. Note that this is in radians, not degrees, and that the angle is the last, not the first item... (x,y,z,radians) """ w,x,y,z = self.internal try: aw = acos(w) except ValueError: # catches errors where w == 1.00000000002 aw = 0 scale = sin(aw) if not scale: return (0,1,0,0) return (x / scale, y / scale, z / scale, 2 * aw ) def inverse( self ): """Construct the inverse of this (unit) quaternion Quaternion conjugate is (w,-x,-y,-z), inverse of a quaternion is conjugate / length**2 (unit quaternion means length == 1) """ w,x,y,z = self.internal return self.__class__( array((w,-x,-y,-z),'d')) def matrix( self, dtype='f',inverse=False ): """Get a rotation matrix representing this rotation dtype -- specifies the result-type of the matrix, defaults to 'f' in order to match real-world precision of matrix operations in video cards inverse -- if True, calculate the inverse matrix for the quaternion """ w,x,y,z = self.internal if inverse: x,y,z = -x,-y,-z return array([ [ 1-2*y*y-2*z*z, 2*x*y+2*w*z, 2*x*z-2*w*y, 0], [ 2*x*y-2*w*z, 1-2*x*x-2*z*z, 2*y*z+2*w*x, 0], [ 2*x*z+2*w*y, 2*y*z-2*w*x, 1-2*x*x-2*y*y, 0], [ 0,0,0,1], ], dtype=dtype) def __getitem__( self, x ): return self.internal[x] def __len__( self ): return len( self.internal) def __repr__( self ): """Return a human-friendly representation of the quaternion Currently this representation is as an axis plus rotation (in radians) """ return """<%s XYZR=%s>"""%( self.__class__.__name__, list(self.XYZR())) def delta( self, other ): """Return the angle in radians between this quaternion and another. Return value is a positive angle in the range 0-pi representing the minimum angle between the two quaternion rotations. From code by <NAME> on the 3D game development algos list """ #first get the dot-product of the two vectors cosValue = sum(self.internal + other.internal) # now get the positive angle in range 0-pi return acos( cosValue ) def slerp( self, other, fraction = 0, minimalStep= 0.0001): """Perform fraction of spherical linear interpolation from this quaternion to other quaternion Algo is from: http://www.gamasutra.com/features/19980703/quaternions_01.htm """ fraction = float( fraction ) cosValue = float(sum(self.internal * other.internal)) # if the cosValue is negative, use negative target and cos values? # not sure why, it's just done this way in the sample code if cosValue < 0.0: cosValue = -cosValue target = -other.internal else: # TODO: figure out why other.internal[:] returns a 0-dim array! target = other.internal[::] if (1.0- cosValue) > minimalStep: # regular spherical linear interpolation angle = acos( cosValue ) angleSin = sin( angle ) sourceScale = sin( (1.0- fraction) * angle ) / angleSin targetScale = sin( fraction * angle ) / angleSin else: sourceScale = 1.0-fraction targetScale = fraction try: return self.__class__( (sourceScale * self.internal)+(targetScale * target) ) except ValueError, err: print sourceScale print self.internal print targetScale print target raise def test (): print 'fromEuler' print fromEuler( pi/2 ).XYZR() print fromEuler( y = pi/2 ).XYZR() print fromEuler( z = pi/2 ).XYZR() print fromEuler( y = pi/2, z = pi/2 ).matrix() rot = fromEuler( y = pi/2, z = pi/2 ).XYZR() print apply( fromXYZR, rot).matrix() print fromEuler( y = pi/2, z = pi/2 ) first = fromXYZR( 0,1,0,0 ) second = fromXYZR( 0,1,0,pi ) for fraction in arange( 0.0, 1.0, .01 ): print first.slerp( second, fraction ) first = fromXYZR( 0,1,0,0 ) second = first.inverse() assert allclose( first.internal,second.internal ), (first, second) first = fromXYZR( 0,1,0,pi/2 ) second = first.inverse() expected = fromXYZR( 0,1,0,-pi/2) assert allclose( second.internal, expected.internal ), (second,expected ) if __name__== "__main__": test ()
<filename>Vocoder_train.py<gh_stars>0 #encoding:utf-8 import random import numpy as np import glob import os import itertools import time import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as data import torchvision from torchvision import models,transforms import torchvision.utils as vutils import torch.nn.init as init from torch.autograd import Function import torch.nn.functional as F import torchaudio from module.dataset import * from module.vocoder import * #乱数のシードを設定 これにより再現性を確保できる manualSeed = 999 print("Random Seed: ", manualSeed) random.seed(manualSeed) torch.manual_seed(manualSeed) #データセットの、各データへのパスのフォーマット make_datapath_listへの引数 dataset_path = "./dataset/train/domainA/**/*.wav" #学習過程を見るための、サンプル音声のパス(フォーマットではなく普通のパスとして指定) sample_audio_path = "./dataset/train/domainA/jvs002_0.wav" #結果を出力するためのディレクトリ output_dir = "./output/" #使用するデバイス device = "cuda:0" #バッチサイズ batch_size = 16 #イテレーション数 total_iterations = 250000 #学習率 lr = 4e-4 #学習率をdecay_iterイテレーションごとにdecay_rate倍する lr_decay_iter = 50000 lr_decay_rate = 0.5 #何イテレーションごとに学習結果を出力するか output_iter = 5000 #出力用ディレクトリがなければ作る os.makedirs(output_dir, exist_ok=True) #データセットの読み込み、データセット作成 path_list = make_datapath_list(dataset_path) train_dataset = Audio_Dataset_for_Vocoder(file_list=path_list, extract_frames=24) dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=os.cpu_count(), #num_workerごとにシードを設定 これがないと各num_workerにおいて乱数が似たような値を返してしまう worker_init_fn=lambda worker_id: torch.manual_seed(manualSeed + worker_id) ) print("dataset size: {}".format(len(path_list))) #GPUが使用可能かどうか確認 device = torch.device(device if torch.cuda.is_available() else "cpu") print("device:",device) #ネットワークを初期化するための関数 def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.xavier_uniform_(m.weight.data, gain=1.0) if m.bias is not None: m.bias.data.fill_(0.01) elif classname.find('Linear') != -1: nn.init.xavier_uniform_(m.weight.data, gain=1.0) if m.bias is not None: m.bias.data.fill_(0.01) #Vocoderのインスタンスを生成 vocoder = Vocoder() #ネットワークをデバイスに移動 vocoder = vocoder.to(device) #ネットワークの初期化 vocoder.apply(weights_init) #optimizerをGeneratorとDiscriminatorに適用 beta1 = 0.9 beta2 = 0.999 optimizer = optim.Adam(vocoder.parameters(), lr=lr, betas=(beta1, beta2)) #Vocoderの学習過程を見るためのサンプル音声をロード、スペクトログラムを生成 sample_waveform, _ = torchaudio.load(sample_audio_path) sample_waveform = sample_waveform.squeeze(dim=0) sample_spectrogram = torchaudio.transforms.Spectrogram(n_fft=254, hop_length=128)(sample_waveform) #GriffinLimによって生成したwaveform(vocoderによる生成結果との比較用) sample_griffinlim_waveform = torchaudio.transforms.GriffinLim(n_fft=254, n_iter=256, hop_length=128)(sample_spectrogram) #学習開始 #学習過程を追うための変数 losses = [] #現在のイテレーション回数 now_iteration = 0 print("Start Training") #学習開始時刻を保存 t_epoch_start = time.time() #エポックごとのループ itertools.count()でカウンターを伴う無限ループを実装可能 for epoch in itertools.count(): #ネットワークを学習モードにする vocoder.train() #データセットA, Bからbatch_size枚ずつ取り出し学習 for (waveform_quantized, spectrogram) in dataloader: #waveform_quantized : torch.Size([frame*hop_length+1]) #spectrogram : torch.Size([frequency, frame]) #学習率の減衰の処理 if((now_iteration%lr_decay_iter==0) and (not now_iteration==0)): optimizer.param_groups[0]['lr'] *= lr_decay_rate #deviceに転送 waveform_quantized = waveform_quantized.to(device) spectrogram = spectrogram.to(device) #------------------------- #Vocoderの学習 #------------------------- spectrogram = spectrogram.transpose(1, 2) predicted = vocoder(waveform_quantized[:, :-1], spectrogram) loss = F.cross_entropy(predicted.transpose(1, 2), waveform_quantized[:, 1:]) #溜まった勾配をリセット optimizer.zero_grad() #傾きを計算 loss.backward() #gradient explosionを避けるため勾配を制限 nn.utils.clip_grad_norm_(vocoder.parameters(), max_norm=1.0, norm_type=2.0) #Generatorのパラメーターを更新 optimizer.step() #グラフへの出力用 losses.append(loss.item()) #学習状況をstdoutに出力 if now_iteration % 10 == 0: print(f"[{now_iteration}/{total_iterations}] Loss/vocoder:{loss:.5f}") #学習状況をファイルに出力 if((now_iteration%output_iter==0) or (now_iteration+1>=total_iterations)): out_dir = os.path.join(output_dir, f"iteration{now_iteration}") #出力用ディレクトリがなければ作る os.makedirs(out_dir, exist_ok=True) #ここまでの学習にかかった時間を出力 t_epoch_finish = time.time() total_time = t_epoch_finish - t_epoch_start with open(os.path.join(out_dir,"time.txt"), mode='w') as f: f.write("total_time: {:.4f} sec.\n".format(total_time)) #学習済みモデル(CPU向け)を出力 vocoder.eval() torch.save(vocoder.to('cpu').state_dict(), os.path.join(out_dir, "vocoder_trained_model_cpu.pth")) vocoder.to(device) vocoder.train() #lossのグラフ(対数スケール)を出力 plt.clf() plt.figure(figsize=(10, 5)) plt.title("Vocoder Loss During Training") plt.plot(losses, label="loss") plt.xlabel("iterations") plt.ylabel("Loss") plt.legend() plt.grid() plt.savefig(os.path.join(out_dir, "loss.png")) plt.close() #推論を実行、結果を保存する #推論を実行 vocoder.eval() sample_generated_waveform = vocoder.generate(sample_spectrogram[None, ...].transpose(1, 2).to(device)) vocoder.train() #結果を保存する torchaudio.save(os.path.join(out_dir, "sample_audio.wav"), sample_waveform[None, ...], sample_rate=16000) torchaudio.save(os.path.join(out_dir, "sample_generated_audio.wav"), sample_generated_waveform[None, ...], sample_rate=16000) #比較用として、GriffinLimによって生成したwavも出力する torchaudio.save(os.path.join(out_dir, "sample_griffinlim_audio.wav"), sample_griffinlim_waveform[None, ...], sample_rate=16000) #音声を、波形とスペクトログラム2つの観点で比較するためのグラフを出力する waveform_list = [ (sample_waveform, "original_waveform"), (sample_generated_waveform, "waveform generated by Vocoder"), (sample_griffinlim_waveform, "waveform generated by GriffinLim"), ] spectrogram_list = [ (sample_spectrogram, "spectrogram"), ] output_comparison_graph( save_path = os.path.join(out_dir, "comparison.png"), waveform_list=waveform_list, #waveform_list : (torch.size([frame]), graph_title)を要素に持つlist spectrogram_list=spectrogram_list, #spectrogram_list : (torch.Size([frequency, frame]), graph_title)を要素に持つlist sampling_rate=16000, #サンプリングレート ) now_iteration += 1 #イテレーション数が上限に達したらループを抜ける if(now_iteration>=total_iterations): break #イテレーション数が上限に達したらループを抜ける if(now_iteration>=total_iterations): break
from pykeepass import PyKeePass class MoreThanOneServersGroupError(Exception): """if there is more than one servers group outside recycle bin in KP""" class NoServerGroupError(Exception): """No server group was found to get data.""" class ServersGroupPresentError(Exception): """If first run, there should be no active server folders.""" class KPManager(object): def __init__(self, kp_db_path, password, testing=False): self.kp_db_path = kp_db_path self.password = password self.conn = self.init_connection() self.server_group = None self.rubbish = self.find_recycle_bin() self.testing = testing if self.testing: self.test_group = self.testing_configuration() def init_connection(self): try: kp = PyKeePass(self.kp_db_path, password=self.password) except IOError: raise except Exception as e: raise else: return kp def find_recycle_bin(self): rubbish = self.conn.find_groups_by_name("Recycle Bin", first=True) return rubbish def first_run_check(self): server_group = self.filter_trashed_server_groups( self.conn.find_groups_by_name("servers"), first_run=True ) def filter_trashed_server_groups(self, server_groups, first_run=False): server_groups_new = [] for server_group in server_groups: if server_group.parentgroup.uuid == self.rubbish.uuid: continue server_groups_new.append(server_group) if len(server_groups_new) == 1: if first_run: raise ServersGroupPresentError("Delete old servers group") return server_groups_new[0] if len(server_groups_new) == 0: return None raise MoreThanOneServersGroupError( "More than one 'servers' group in KP. Delete old groups." ) def find_or_create_server_group(self, find_only=False): server_group = self.filter_trashed_server_groups( self.conn.find_groups_by_name("servers") ) if server_group: self.server_group = server_group else: if find_only: raise NoServerGroupError("No server group.") server_group = self.conn.add_group(self.conn.root_group, "servers") self.server_group = server_group def testing_configuration(self): test_group = self.conn.add_group(self.conn.root_group, "test") return test_group def get_data_from_servers_group(self, ssh_conf): conns = [] for i in self.server_group.entries: if ssh_conf: # TODO what if does not have title? key_val = i.title, str(i.password) else: key_val = (i.username + "@" + i.url + ":" + "22", str(i.password)) conns.append(key_val) return conns def build_connection_map_from_entries(self, ssh_conf=False): return dict(self.get_data_from_servers_group(ssh_conf)) def parse_and_create_entry_map(self, key_val_pair): key, pwd = key_val_pair usr, url_port = key.split("@") url, port = url_port.split(":") return { "destination_group": self.test_group if self.testing else self.server_group, "title": url.split(".")[0], "username": usr, "password": <PASSWORD>, "url": url } def add_entry(self, entry): self.conn.add_entry(**entry) def add_all(self, dict_obj): self.find_or_create_server_group() for entry in dict_obj.iteritems(): entry = self.parse_and_create_entry_map(entry) self.add_entry(entry) def add_all_ssh(self, host_new_pwd, ssh_config_obj): self.find_or_create_server_group() for host, obj in ssh_config_obj.iteritems(): if host in host_new_pwd: try: host_name = obj["HostName"] except KeyError: host_name = host entry = { "destination_group": self.server_group, "title": host, "username": obj["User"], "password": host_new_pwd[host], "url": host_name, "notes": obj["IdentityFile"] } self.add_entry(entry) def delete_group(self, group): self.conn.delete_group(group) def delete_server_group(self): self.conn.delete_group(self.server_group) def save_changes(self): self.conn.save() if __name__ == "__main__": from config import config connections = { '<EMAIL>:22': 'IqIQSgg5aUZEPv82%z87e8RHY', '<EMAIL>.in.uptime.at:22': 'fJNiDJZd6VXQo!iyj5%BRMh37', '<EMAIL>:22': 'G*hK42^t*Q(CjZmMx0firycou', '<EMAIL>:22': 'jHA^f)TkR96uE*W9Y4Ja^)8yJ' } kp = KPManager(config.keepass_db_path, config.keepass_pwd) x = kp.conn.find_entries_by_title("aaa")[0] y = x.password print y def escape_single_quotes(pwd): res = pwd.split("'") if len(res) == 1: return False, pwd if res[0] is '' and res[-1] is '': escaped = "'\\''".join(res)[1:-1] elif res[0] is '' and res[-1] is not '': escaped = "'\\''".join(res)[1:] + "'" elif res[-1] is '' and res[0] is not '': escaped = "'" + "'\\''".join(res)[:-1] else: escaped = "'" + "'\\''".join(res) + "'" return True, escaped y = escape_single_quotes(y) print y cmd = "passwd <<< %s$'\\n''%s'$'\\n''%s'" % (y, "andrej", "andrej123") print cmd
import sys from db_handler import DBHandler from osm_handler import OSMHandler def get_args(): import argparse p = argparse.ArgumentParser(description="Data preparation for Miami's OSM Building import") p.add_argument('-setup', '--setup', help='Set up Postgres DB.', action='store_true') p.add_argument('-bd', '--buildings_download', help='Download Buildings from OSM', action="store_true") p.add_argument('-ad', '--address_download', help='Download Addresses from OSM', action="store_true") p.add_argument('-rd', '--roads_download', help='Download highway=* from OSM', action="store_true") p.add_argument('-b', '--bbox', help='BBOX for OSM download (min_lat, min_long, max_lat, max_long). Whole extent of Large buildings is used if left empty') p.add_argument('-msi', '--move_self_intersect', help='Moves self intersecting buildings to manual bucket.', action="store_true") p.add_argument('-de', '--delete_err', help='Removes erroneous buildings ', action='store_true') p.add_argument('-mi', '--move_intersect', help='Moves buildings that share common border to manual bucket', action="store_true") p.add_argument('-d', '--dsn', help='Dsn for database connection.') p.add_argument('-i', '--intersect', help='Performs intersection of Large Buildings and OSM buildings.', action="store_true") p.add_argument('-v', '--vacuum', help='Vacuums Postgres DB.', action="store_true") p.add_argument('-ca', '--check_address', help='Checks whether buildings to upload overlap with existing OSM addresses.', action="store_true") p.add_argument('-crr', '--check_road_rail', help='Checks whether buildings to upload overlap with OSM highway=* or railway=*.', action="store_true") p.add_argument('-idx', '--index_data', help='Creates indexes on several tables.', action="store_true") p.add_argument('-a', '--assign_address', help='Assigns an address to buildings with only 1 overlapping address point.', action="store_true") p.add_argument('-r', '--report', help='Prints out a quick report.', action="store_true") return p.parse_args() if __name__ == "__main__": args = vars(get_args()) setup = args["setup"] building_download = args["buildings_download"] address_download = args["address_download"] roads_download = args["roads_download"] delete_err = args["delete_err"] move_self_intersect = args["move_self_intersect"] move_intersect = args["move_intersect"] dsn = args["dsn"] intersect = args["intersect"] address = args["assign_address"] check_address = args["check_address"] check_road_rail = args["check_road_rail"] vacuum = args["vacuum"] report = args["report"] index = args["index_data"] bbox = args["bbox"] db = DBHandler(dsn) osm = OSMHandler(bbox) if setup: print 'Setting up the database.' db.setup_db() if building_download: print 'Querying OverpassAPI for buildings.' buildings = osm.query_buildings() print 'Uploading OSM buildings to Postgres...' db.upload_osm(buildings, 'osm_buildings') if address_download: print 'Querying OverpassAPI for addresses.' addresses = osm.query_address() print 'Uploading OSM addresses to Postgres...' db.upload_osm(addresses, 'osm_addresses') if roads_download: print 'Querying OverpassAPI for highway=* and railway=*.' roads = osm.query_roads() print 'Uploading OSM highway=* and railway=* to Postgres...' db.upload_osm(roads, 'osm_highway_railway') if vacuum: print 'Updating DB stats.' db.update_stats() if index: print 'Creating multiple indexes.' db.create_index() if delete_err: print 'Removing faulty buildings.' db.delete_err_buildings() if intersect: print 'Intersecting OSM buildings with Large buildings. Populating tables for overlapping and non-overlapping buildings.' db.do_intersection() if move_self_intersect: print 'Checking self intersecting buildings and moving them to manual bucket.' db.move_self_intersect() if move_intersect: print 'Moving buildings that share common border to manual bucket' db.move_intersect() if address: print 'Assigning addresses to buildings.' db.update_address() if check_address: print 'Checking OSM addresses in the proximity of buildings.' db.check_and_move('address') if check_road_rail: print 'Checking buildings overlapping with highway/railway.' db.check_and_move('road/rail') if report: db.print_report() print 'Closing DB connection.' db.close_db_conn()
import abc import logging import os import random import tempfile import threading from streamlink.compat import is_py3, is_win32 if is_win32: from ctypes import windll, cast, c_ulong, c_void_p, byref log = logging.getLogger(__name__) _lock = threading.Lock() _id = 0 ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) class NamedPipeBase(ABC): def __init__(self): global _id with _lock: _id += 1 self.name = "streamlinkpipe-{0}-{1}-{2}".format(os.getpid(), _id, random.randint(0, 9999)) log.info("Creating pipe {0}".format(self.name)) self._create() @abc.abstractmethod def _create(self): # type: () -> None raise NotImplementedError @abc.abstractmethod def open(self): # type: () -> None raise NotImplementedError @abc.abstractmethod def write(self, data): # type: () -> int raise NotImplementedError @abc.abstractmethod def close(self): # type: () -> None raise NotImplementedError class NamedPipePosix(NamedPipeBase): mode = "wb" permissions = 0o660 fifo = None def _create(self): self.path = os.path.join(tempfile.gettempdir(), self.name) os.mkfifo(self.path, self.permissions) def open(self): self.fifo = open(self.path, self.mode) def write(self, data): return self.fifo.write(data) def close(self): if self.fifo is not None: self.fifo.close() self.fifo = None os.unlink(self.path) class NamedPipeWindows(NamedPipeBase): bufsize = 8192 pipe = None PIPE_ACCESS_OUTBOUND = 0x00000002 PIPE_TYPE_BYTE = 0x00000000 PIPE_READMODE_BYTE = 0x00000000 PIPE_WAIT = 0x00000000 PIPE_UNLIMITED_INSTANCES = 255 INVALID_HANDLE_VALUE = -1 @staticmethod def _get_last_error(): error_code = windll.kernel32.GetLastError() raise OSError("Named pipe error code 0x{0:08X}".format(error_code)) def _create(self): if is_py3: create_named_pipe = windll.kernel32.CreateNamedPipeW else: create_named_pipe = windll.kernel32.CreateNamedPipeA self.path = os.path.join("\\\\.\\pipe", self.name) self.pipe = create_named_pipe( self.path, self.PIPE_ACCESS_OUTBOUND, self.PIPE_TYPE_BYTE | self.PIPE_READMODE_BYTE | self.PIPE_WAIT, self.PIPE_UNLIMITED_INSTANCES, self.bufsize, self.bufsize, 0, None ) if self.pipe == self.INVALID_HANDLE_VALUE: self._get_last_error() def open(self): windll.kernel32.ConnectNamedPipe(self.pipe, None) def write(self, data): written = c_ulong(0) windll.kernel32.WriteFile( self.pipe, cast(data, c_void_p), len(data), byref(written), None ) return written.value def close(self): if self.pipe is not None: windll.kernel32.DisconnectNamedPipe(self.pipe) windll.kernel32.CloseHandle(self.pipe) self.pipe = None NamedPipe = NamedPipePosix if not is_win32 else NamedPipeWindows
<gh_stars>0 from datetime import datetime from epaper.appconfig import AppConfig from epaper.epaper import EPaper from epaper.scraper import Scraper from epaper.ui import UI import click import epaper import json import logging import os logger = logging.getLogger('cli') def doit(interactive=True, publication_code=None, edition_code=None, date=None, from_config=False): # noqa: we know this function is complex '''Main Execution Module''' # Load app configuration: app-specific configuration management app_config = AppConfig() # setup logging logging.basicConfig( filename=app_config.config['App']['log_file'], filemode='w', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG ) # choose a default publisher -- as of now this is the only one. publisher = 'TOI' # Scraper instance: scraper functions scraper = Scraper(publisher=publisher, app_config=app_config) # UI instance: generic ui interaction functions ui = UI(publisher=publisher, app_config=app_config, text=True) # Data instance: Data management epaper = EPaper(publisher=publisher, app_config=app_config) # Pick a publication doc = scraper.fetch(scraper.site_archive_url) # Highlight if not available message = 'This website is currently not available in your region.' if doc and (message in doc.body.text): logger.error(message) print(message) return False if doc: epaper.publications = scraper.parse_publication_codes(doc) if publication_code and \ (publication_code in epaper.publications.values()): # non-interactive with cli options epaper.selected_publication = [ (k, v) for k, v in epaper.publications.items() if v == publication_code][0] elif publication_code is None and from_config: # non-interactive with config file publication_code = app_config.config[publisher].get( 'selected_pub_code', None) if publication_code in epaper.publications.values(): epaper.selected_publication = [ (k, v) for k, v in epaper.publications.items() if v == publication_code][0] else: # simple interactive mode epaper.selected_publication = ui.select_publication( epaper.publications, default=app_config.config[publisher].get( 'selected_pub_code', None) ) else: logger.error('Could not obtain publication codes.') return False # Pick an edition # logger.info(f'XXX - {epaper.selected_publication[1]}') doc = scraper.fetch( scraper.site_archive_edition_url.format( pub_code=epaper.selected_publication[1])) if doc: epaper.editions = scraper.parse_edition_codes(doc) if edition_code and \ (edition_code in epaper.editions.values()): # non-interactive with cli options epaper.selected_edition = [ (k, v) for k, v in epaper.editions.items() if v == edition_code][0] elif edition_code is None and from_config: # non-interactive with config file edition_code = app_config.config[publisher].get( 'selected_edition_code', None) if edition_code in epaper.editions.values(): epaper.selected_edition = [ (k, v) for k, v in epaper.editions.items() if v == edition_code][0] else: # simple interactive mode epaper.selected_edition = ui.select_edition( epaper.editions, default=app_config.config[publisher].get( 'selected_edition_code', None) ) else: logger.error('Could not obtain edition codes.') return False if epaper.selected_publication[1] == '' or \ epaper.selected_edition[1] == '': return False epaper.save_codes_to_config() # Pick a date, if we are in interactive mode, else it defaults to todays date. # XXX: date may not be required for some editions... if interactive: epaper.selected_date = ui.select_pub_date() elif isinstance(date, type('')): epaper.selected_date = datetime.strptime(date, '%Y-%m-%d') # $HOME/cache_dir/pub/edition/date epaper.create_download_dir() # inform ui ui.download_path = epaper.download_path logger.info('Downloading epaper...') logger.info('pub_code={0}, edition={1}, date={2}'.format( epaper.selected_publication[1], epaper.selected_edition[1], str(epaper.selected_date.date()) )) date_str = '{year:04d}{month:02d}{day:02d}'.format( year=epaper.selected_date.year, month=epaper.selected_date.month, day=epaper.selected_date.day ) toc_url = scraper.build_toc_url( pub_code=epaper.selected_publication[1], edition_code=epaper.selected_edition[1], date_str=date_str ) epaper.toc_dict = scraper.fetch(toc_url) # check for valid dict format. if epaper.toc_dict is None: logger.error('Table of contents could not be retrieved! exiting...') return False if 'toc' not in epaper.toc_dict: logger.error('TOC JSON format error! exiting...') return False # save the toc to default download location toc_file = os.path.join(epaper.download_path, 'toc.json') with open(toc_file, 'w') as toc: toc.write(json.dumps(epaper.toc_dict)) epaper.num_pages = len(epaper.toc_dict['toc']) # build the epaper.pages list of epaper.Page structures for i, page in enumerate(epaper.toc_dict['toc']): ui.update_status( message='Retrieving page {0} metadata'.format(i), end='', flush=True ) urls = scraper.build_page_urls( pub_code=epaper.selected_publication[1], edition_code=epaper.selected_edition[1], date_str=date_str, page_folder=page['page_folder'] ) urls['thumbnail'][1] = os.path.join( epaper.download_path, 'page-{0:03d}-thumbnail.jpg'.format(int(page['page']))) urls['lowres'][1] = os.path.join( epaper.download_path, 'page-{0:03d}-lowres.jpg'.format(int(page['page']))) urls['highres'][1] = os.path.join( epaper.download_path, 'page-{0:03d}-highres.jpg'.format(int(page['page']))) urls['pdf'][1] = os.path.join( epaper.download_path, 'page-{0:03d}-highres.pdf'.format(int(page['page']))) epaper.pages.append( epaper.Page( number=int(page['page']), title=page['page_title'], urls=urls ) ) # download required pages ui.update_status( message='Downloading pages...', end='', flush=True ) for i, page in enumerate(epaper.pages): page_downloads = 0 for j, url_key in enumerate(page.urls): url = page.urls[url_key][0] filename = page.urls[url_key][1] file_exists = page.urls[url_key][2] if file_exists: continue if url_key == 'pdf': continue status, count = scraper.save_image(url, filename, delay=False) if status: page_downloads += 1 # update file_exists flag in epaper.pages if os.path.exists(filename): epaper.pages[i].urls[url_key][2] = True # track if page_downloads >= 2: # successful download and save of thumbnail and at least one of # low or highres images. ui.num_downloads += 1 ui.update_status( message='Downloaded page {}'.format(page.number), end='', flush=True ) else: # note failed attempts ui.failed.append(page.number) ui.update_status( message='Failed to downloaded page {}'.format(page.number), end='', flush=True ) # final counts ui.update_status(message='Downloaded {0} pages.'.format(ui.num_downloads)) if len(ui.failed) > 0: ui.update_status(message='Failed to download {0} pages: {1}'.format( len(ui.failed), repr(ui.failed))) # save page metadata as json, so UI tools can read it. epaper.save_page_metadata() # notify ui.notify( publication=epaper.selected_publication[0], edition=epaper.selected_edition[0] ) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(context_settings=CONTEXT_SETTINGS) @click.option('--publication_code', default='', help='Publication code as on SITE_ARCHIVE') @click.option('--edition_code', default='', help='Edition code as on SITE_ARCHIVE') @click.option('--date', default=str(datetime.now().date()), help='Edition date, default is todays date.') @click.option('--from-config', is_flag=True, help='Use publication and edition codes from default config file.') @click.option('--verbose', is_flag=True, help='Be more verbose on STDOUT.') @click.option('--version', is_flag=True, help='Print version.') def main(publication_code, edition_code, date, from_config, verbose, version): '''EPaper Command Line Interface.''' if version: click.echo('EPaper version {0}'.format(epaper.__version__)) elif publication_code and \ edition_code and \ date: if verbose: click.echo('Non-interactive mode.') return doit(interactive=False, publication_code=publication_code, edition_code=edition_code, date=date, from_config=False) elif from_config: if verbose: click.echo('Using configured settings.') return doit(interactive=False, from_config=True) else: if verbose: click.echo('Using interactive mode.') return doit(interactive=True, from_config=False) if __name__ == '__main__': main()
try: from . import generic as g except BaseException: import generic as g class VoxelTest(g.unittest.TestCase): def test_voxel(self): """ Test that voxels work at all """ for m in [g.get_mesh('featuretype.STL'), g.trimesh.primitives.Box(), g.trimesh.primitives.Sphere()]: for pitch in [.1, .1 - g.tol.merge]: v = m.voxelized(pitch) assert len(v.matrix.shape) == 3 assert v.shape == v.matrix.shape assert v.volume > 0.0 assert v.origin.shape == (3,) assert isinstance(v.pitch, float) assert g.np.isfinite(v.pitch) assert isinstance(v.filled_count, int) assert v.filled_count > 0 box = v.as_boxes(solid=False) boxF = v.as_boxes(solid=True) assert isinstance(box, g.trimesh.Trimesh) assert abs(boxF.volume - v.volume) < g.tol.merge assert g.trimesh.util.is_shape(v.points, (-1, 3)) assert len(v.sparse_solid) > len(v.sparse_surface) assert g.np.all(v.is_filled(v.points)) outside = m.bounds[1] + m.scale assert not v.is_filled(outside) try: cubes = v.marching_cubes assert cubes.area > 0.0 except ImportError: g.log.info('no skimage, skipping marching cubes test') g.log.info('Mesh volume was %f, voxelized volume was %f', m.volume, v.volume) def test_marching(self): """ Test marching cubes on a matrix """ try: from skimage import measure # NOQA except ImportError: g.log.warn('no skimage, skipping marching cubes test') return # make sure offset is correct matrix = g.np.ones((3, 3, 3), dtype=g.np.bool) mesh = g.trimesh.voxel.matrix_to_marching_cubes( matrix=matrix, pitch=1.0, origin=g.np.zeros(3)) assert mesh.is_watertight mesh = g.trimesh.voxel.matrix_to_marching_cubes( matrix=matrix, pitch=3.0, origin=g.np.zeros(3)) assert mesh.is_watertight def test_marching_points(self): """ Try marching cubes on points """ try: from skimage import measure # NOQA except ImportError: g.log.warn('no skimage, skipping marching cubes test') return # get some points on the surface of an icosahedron points = g.trimesh.creation.icosahedron().sample(1000) # make the pitch proportional to scale pitch = points.ptp(axis=0).min() / 10 # run marching cubes mesh = g.trimesh.voxel.points_to_marching_cubes( points=points, pitch=pitch) # mesh should have faces assert len(mesh.faces) > 0 # mesh should be roughly centered assert (mesh.bounds[0] < -.5).all() assert (mesh.bounds[1] > .5).all() def test_local(self): """ Try calling local voxel functions """ mesh = g.trimesh.creation.box() # it should have some stuff voxel = g.trimesh.voxel.local_voxelize( mesh=mesh, point=[.5, .5, .5], pitch=.1, radius=5, fill=True) assert len(voxel[0].shape) == 3 # try it when it definitely doesn't hit anything empty = g.trimesh.voxel.local_voxelize( mesh=mesh, point=[10, 10, 10], pitch=.1, radius=5, fill=True) # shouldn't have hit anything assert len(empty[0]) == 0 # try it when it is in the center of a volume g.trimesh.voxel.local_voxelize( mesh=mesh, point=[0, 0, 0], pitch=.1, radius=2, fill=True) def test_points_to_from_indices(self): # indices = (points - origin) / pitch points = [[0, 0, 0], [0.04, 0.55, 0.39]] origin = [0, 0, 0] pitch = 0.1 indices = [[0, 0, 0], [0, 6, 4]] # points -> indices indices2 = g.trimesh.voxel.points_to_indices( points=points, origin=origin, pitch=pitch) g.np.testing.assert_allclose(indices, indices2, atol=0, rtol=0) # indices -> points points2 = g.trimesh.voxel.indices_to_points(indices=indices, origin=origin, pitch=pitch) g.np.testing.assert_allclose(g.np.array(indices) * pitch + origin, points2, atol=0, rtol=0) g.np.testing.assert_allclose(points, points2, atol=pitch / 2 * 1.01, rtol=0) # indices -> points -> indices (this must be consistent) points2 = g.trimesh.voxel.indices_to_points(indices=indices, origin=origin, pitch=pitch) indices2 = g.trimesh.voxel.points_to_indices(points=points2, origin=origin, pitch=pitch) g.np.testing.assert_allclose(indices, indices2, atol=0, rtol=0) def test_as_boxes(self): voxel = g.trimesh.voxel pitch = 0.1 origin = (0, 0, 0) matrix = g.np.eye(9, dtype=g.np.bool).reshape((-1, 3, 3)) centers = voxel.matrix_to_points(matrix=matrix, pitch=pitch, origin=origin) v = voxel.Voxel(matrix=matrix, pitch=pitch, origin=origin) boxes1 = v.as_boxes() boxes2 = voxel.multibox(centers, pitch) colors = [g.trimesh.visual.DEFAULT_COLOR] * matrix.sum() * 12 for boxes in [boxes1, boxes2]: g.np.testing.assert_allclose( boxes.visual.face_colors, colors, atol=0, rtol=0) # check assigning a single color color = [255, 0, 0, 255] boxes1 = v.as_boxes(colors=color) boxes2 = voxel.multibox(centers=centers, pitch=pitch, colors=color) colors = g.np.array([color] * len(centers) * 12) for boxes in [boxes1, boxes2]: g.np.testing.assert_allclose( boxes.visual.face_colors, colors, atol=0, rtol=0) # check matrix colors colors = color * g.np.ones(g.np.append(v.shape, 4), dtype=g.np.uint8) boxes = v.as_boxes(colors=colors) assert g.np.allclose( boxes.visual.face_colors, color, atol=0, rtol=0) def test_is_filled(self): """More rigorous test of Voxel.is_filled.""" n = 10 matrix = g.np.random.uniform(size=(n + 1,) * 3) > 0.5 not_matrix = g.np.logical_not(matrix) pitch = 1. / n origin = g.np.random.uniform(size=(3,)) vox = g.trimesh.voxel.Voxel(matrix, pitch, origin) not_vox = g.trimesh.voxel.Voxel(not_matrix, pitch, origin) for a, b in ((vox, not_vox), (not_vox, vox)): points = a.points # slight jitter - shouldn't change indices points += ( g.np.random.uniform(size=points.shape) - 1) * 0.4 * pitch g.np.random.shuffle(points) # all points are filled, and no empty points are filled assert g.np.all(a.is_filled(points)) assert not g.np.any(b.is_filled(points)) # test different number of dimensions points = g.np.stack([points, points[-1::-1]], axis=1) assert g.np.all(a.is_filled(points)) assert not g.np.any(b.is_filled(points)) def test_vox_sphere(self): # should be filled from 0-9 matrix = g.np.ones((10, 10, 10)) vox = g.trimesh.voxel.Voxel( matrix, pitch=0.1, origin=[0, 0, 0]) # epsilon from zero eps = 1e-4 # should all be contained grid = g.trimesh.util.grid_linspace( [[eps] * 3, [9 - eps] * 3], 11) * vox.pitch assert vox.is_filled(grid).all() # push it outside the filled area grid += 1.0 assert not vox.is_filled(grid).any() if __name__ == '__main__': g.trimesh.util.attach_to_log() g.unittest.main()
<reponame>cjshearer/project-athena<gh_stars>0 from utils.model import load_pool, load_lenet from utils.file import load_from_json from utils.metrics import error_rate, get_corrections from models.athena import Ensemble, ENSEMBLE_STRATEGY import os import numpy as np def collect_raw_prediction(trans_configs, model_configs, data_configs, use_logits=False, active_list=False): """ :param trans_configs: :param model_configs: :param data_configs: :param use_logits: Boolean. If True, the model will return logits value (before ``softmax``), return probabilities, otherwise. :param active_list: Boolean. If True, only the supplied list of active WDs will be used :return: 3D array of predictions """ # load the pool and create the ensemble pool, wd_models = load_pool(trans_configs=trans_configs, model_configs=model_configs, active_list=active_list, use_logits=use_logits, wrap=True ) athena = Ensemble(classifiers=list(pool.values()), strategy=ENSEMBLE_STRATEGY.MV.value) # load training/testing data print('>>> Loading benign and adversarial samples as training/testing data') bs_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file')) ae_file_list = data_configs.get('ae_files') x = np.empty([len(ae_file_list)+1, 10000, 28, 28, 1]) # load the benign samples x[0] = np.load(bs_file) # load the adversarial samples for i in range(len(ae_file_list)): ae_file = os.path.join(data_configs.get('dir'), ae_file_list[i]) x[i+1] = np.load(ae_file) print('>>> Loaded samples as ndarray with shape', x.shape, ' => (sets of images, number of images in set, image width, image height, pixel value)') print(' (0, 10000, 28, 28, 1) is the set of benign samples') print(' (1:45, 10000, 28, 28, 1) are the sets of adversarial samples','\n') print('>>> Collecting raw predictions from', len(wd_models), 'models for', x.shape[0], 'sets of', x.shape[1], 'images:') raw_preds = np.empty([x.shape[0], len(wd_models), x.shape[1], 10]) for i in range(x.shape[0]): print(' collecting raw predictions for set ', i) raw_preds[i] = (athena.predict(x=x[i], raw=True)) return raw_preds if __name__ == '__main__': # load experiment configurations trans_configs = load_from_json("../../configs/task2/cody_configs/athena-mnist.json") model_configs = load_from_json("../../configs/task2/cody_configs/model-mnist.json") data_configs = load_from_json("../../configs/task2/cody_configs/data-mnist.json") output_dir = "../../../Task2/data" # collect the predictions raw_preds = collect_raw_prediction(trans_configs=trans_configs, model_configs=model_configs, data_configs=data_configs, use_logits=True, active_list=True) file = os.path.join(output_dir, "predictions.npz") print('>>> Saving compressed predictions to ', file) np.savez_compressed(file, raw_preds) print('>>> Predictions saved. You may now close the terminal.')
<filename>klever/core/vtg/emg/common/process/test_process.py # # Copyright (c) 2021 ISP RAS (http://www.ispras.ru) # Ivannikov Institute for System Programming of the Russian Academy of Sciences # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pytest from klever.core.vtg.emg.common.process import Process from klever.core.vtg.emg.common.process.parser import parse_process from klever.core.vtg.emg.common.process.actions import Receive, Dispatch, Block, Concatenation, Choice, Parentheses @pytest.fixture def process(): process = Process('test') test = "(((a).<b> | [c]) . [d]) | [e]" # Parse assert parse_process(process, test) process.actions['a'] = Receive('a') process.actions['b'] = Block('b') for name in 'cde': process.actions[name] = Dispatch(name) return process @pytest.fixture def new(): return Block('r') def test_add_condition(process): new = process.add_condition('x', ['0 == 0'], ['x = 1;'], 'This is a test') assert new and isinstance(new, Block) assert str(new) in process.actions assert not process.actions.behaviour(new.name) def test_add_replace_action(new, process): new = Block('r') old = process.actions['d'] operator = process.actions.behaviour('d').pop().my_operator assert isinstance(operator, Concatenation) process.replace_action(old, new, purge=True) assert operator[-1].kind is Block assert operator[-1].name == 'r' assert operator[-1].description is new assert str(old) not in process.actions assert not process.actions.behaviour(str(old)) assert len(process.actions.behaviour(str(new))) == 1 def test_add_insert_action(new, process): target = process.actions['d'] operator = process.actions.behaviour('d').pop().my_operator process.insert_action(new, target, before=True) assert operator[-2].kind is Block, repr(operator) assert operator[-2].name == 'r', repr(operator) assert operator[-2].description is new, f"{repr(operator)} {operator[-1].description}" assert str(new) in process.actions assert operator[-1].kind is Dispatch, repr(operator) assert operator[-1].name == 'd', repr(operator) assert operator[-1].description is target, f"{repr(operator)} {operator[-1].description}" assert str(target) in process.actions process.insert_action(new, target, before=False) assert operator[-1].kind is Block, repr(operator) assert operator[-1].name == 'r', repr(operator) assert operator[-1].description is new, repr(operator) assert str(new) in process.actions assert operator[-2].kind is Dispatch, repr(operator) assert operator[-2].name == 'd', repr(operator) assert operator[-2].description is target, repr(operator) assert str(target) in process.actions def test_insert_before(new, process): target = process.actions['c'] operator = process.actions.behaviour('c').pop().my_operator assert isinstance(operator, Concatenation) assert isinstance(operator.my_operator, Choice) # Simple case process.insert_action(new, target, before=True) assert isinstance(operator, Concatenation) assert isinstance(operator.my_operator, Choice) assert str(operator[0].description) == 'r' assert str(operator[1].description) == 'c' def test_insert_choice(process): target = process.actions['c'] operator = process.actions.behaviour('c').pop().my_operator assert isinstance(operator, Concatenation) operator = operator.my_operator assert isinstance(operator, Choice) assert len(operator) == 2 # Add more options new_x1 = process.add_condition('x1', [], [], 'This is a test') new_x2 = process.add_condition('x2', [], [], 'This is a test') process.insert_alternative_action(new_x1, target) process.insert_alternative_action(new_x2, target) assert len(operator) == 4 # Then add precondition to x1 new_x3 = process.add_condition('x3', [], [], 'This is a test') process.insert_action(new_x3, new_x1, before=True) operator = process.actions.behaviour('x1').pop().my_operator assert isinstance(operator, Concatenation) assert str(operator[0].description) == 'x3' assert str(operator[1].description) == 'x1'
<reponame>stjordanis/catalyst-1<filename>catalyst/engines/xla.py from typing import Any, Callable, Dict, Optional import numpy as np import torch from torch.utils.data import DataLoader from catalyst.engines.torch import DeviceEngine from catalyst.settings import SETTINGS if SETTINGS.xla_required: import torch_xla.core.xla_model as xm from torch_xla.distributed.parallel_loader import ParallelLoader import torch_xla.distributed.xla_multiprocessing as xmp class XLAEngine(DeviceEngine): """XLA SingleTPU training device engine. Examples: .. code-block:: python import os from datetime import datetime import torch from torch import nn, optim from torch.utils.data import DataLoader from catalyst import dl from catalyst.contrib.datasets import CIFAR10 from catalyst.contrib.nn import ResidualBlock from catalyst.data import transforms def conv_block(in_channels, out_channels, pool=False): layers = [ nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), ] if pool: layers.append(nn.MaxPool2d(2)) return nn.Sequential(*layers) def resnet9(in_channels: int, num_classes: int, size: int = 16): sz, sz2, sz4, sz8 = size, size * 2, size * 4, size * 8 return nn.Sequential( conv_block(in_channels, sz), conv_block(sz, sz2, pool=True), ResidualBlock(nn.Sequential(conv_block(sz2, sz2), conv_block(sz2, sz2))), conv_block(sz2, sz4, pool=True), conv_block(sz4, sz8, pool=True), ResidualBlock(nn.Sequential(conv_block(sz8, sz8), conv_block(sz8, sz8))), nn.Sequential( nn.MaxPool2d(4), nn.Flatten(), nn.Dropout(0.2), nn.Linear(sz8, num_classes) ), ) class CustomRunner(dl.IRunner): def __init__(self, logdir): super().__init__() self._logdir = logdir def get_engine(self): return dl.XLAEngine() def get_loggers(self): return { "console": dl.ConsoleLogger(), "csv": dl.CSVLogger(logdir=self._logdir), "tensorboard": dl.TensorboardLogger(logdir=self._logdir), } @property def stages(self): return ["train"] def get_stage_len(self, stage: str) -> int: return 3 def get_loaders(self, stage: str): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) train_data = CIFAR10(os.getcwd(), train=False, download=True, transform=transform) valid_data = CIFAR10(os.getcwd(), train=False, download=True, transform=transform) if self.engine.is_ddp: train_sampler = torch.utils.data.distributed.DistributedSampler( train_data, num_replicas=self.engine.world_size, rank=self.engine.rank, shuffle=True ) valid_sampler = torch.utils.data.distributed.DistributedSampler( valid_data, num_replicas=self.engine.world_size, rank=self.engine.rank, shuffle=False ) else: train_sampler = valid_sampler = None return { "train": DataLoader(train_data, batch_size=32, sampler=train_sampler), "valid": DataLoader(valid_data, batch_size=32, sampler=valid_sampler), } def get_model(self, stage: str): model = self.model \ if self.model is not None \ else resnet9(in_channels=3, num_classes=10) return model def get_criterion(self, stage: str): return nn.CrossEntropyLoss() def get_optimizer(self, stage: str, model): return optim.Adam(model.parameters(), lr=1e-3) def get_scheduler(self, stage: str, optimizer): return optim.lr_scheduler.MultiStepLR(optimizer, [5, 8], gamma=0.3) def get_callbacks(self, stage: str): return { "criterion": dl.CriterionCallback( metric_key="loss", input_key="logits", target_key="targets" ), "optimizer": dl.OptimizerCallback(metric_key="loss"), "scheduler": dl.SchedulerCallback(loader_key="valid", metric_key="loss"), "accuracy": dl.AccuracyCallback( input_key="logits", target_key="targets", topk_args=(1, 3, 5) ), "checkpoint": dl.CheckpointCallback( self._logdir, loader_key="valid", metric_key="accuracy", minimize=False, save_n_best=1, ), "tqdm": dl.TqdmCallback(), } def handle_batch(self, batch): x, y = batch logits = self.model(x) self.batch = { "features": x, "targets": y, "logits": logits, } logdir = f"logs/{datetime.now().strftime('%Y%m%d-%H%M%S')}" runner = CustomRunner(logdir) runner.run() """ def __init__(self): """Init.""" super().__init__() self._device = xm.xla_device() def optimizer_step(self, loss, model, optimizer) -> None: """Abstraction over ``optimizer.step()`` step.""" xm.optimizer_step(optimizer, barrier=True) class DistributedXLAEngine(DeviceEngine): """Distributed XLA MultiTPU training device engine. Examples: .. code-block:: python import os from datetime import datetime import torch from torch import nn, optim from torch.utils.data import DataLoader from catalyst import dl from catalyst.contrib.datasets import CIFAR10 from catalyst.contrib.nn import ResidualBlock from catalyst.data import transforms def conv_block(in_channels, out_channels, pool=False): layers = [ nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), ] if pool: layers.append(nn.MaxPool2d(2)) return nn.Sequential(*layers) def resnet9(in_channels: int, num_classes: int, size: int = 16): sz, sz2, sz4, sz8 = size, size * 2, size * 4, size * 8 return nn.Sequential( conv_block(in_channels, sz), conv_block(sz, sz2, pool=True), ResidualBlock(nn.Sequential(conv_block(sz2, sz2), conv_block(sz2, sz2))), conv_block(sz2, sz4, pool=True), conv_block(sz4, sz8, pool=True), ResidualBlock(nn.Sequential(conv_block(sz8, sz8), conv_block(sz8, sz8))), nn.Sequential( nn.MaxPool2d(4), nn.Flatten(), nn.Dropout(0.2), nn.Linear(sz8, num_classes) ), ) class CustomRunner(dl.IRunner): def __init__(self, logdir): super().__init__() self._logdir = logdir def get_engine(self): return dl.DistributedXLAEngine() def get_loggers(self): return { "console": dl.ConsoleLogger(), "csv": dl.CSVLogger(logdir=self._logdir), "tensorboard": dl.TensorboardLogger(logdir=self._logdir), } @property def stages(self): return ["train"] def get_stage_len(self, stage: str) -> int: return 3 def get_loaders(self, stage: str): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) train_data = CIFAR10(os.getcwd(), train=False, download=True, transform=transform) valid_data = CIFAR10(os.getcwd(), train=False, download=True, transform=transform) if self.engine.is_ddp: train_sampler = torch.utils.data.distributed.DistributedSampler( train_data, num_replicas=self.engine.world_size, rank=self.engine.rank, shuffle=True ) valid_sampler = torch.utils.data.distributed.DistributedSampler( valid_data, num_replicas=self.engine.world_size, rank=self.engine.rank, shuffle=False ) else: train_sampler = valid_sampler = None return { "train": DataLoader(train_data, batch_size=32, sampler=train_sampler), "valid": DataLoader(valid_data, batch_size=32, sampler=valid_sampler), } def get_model(self, stage: str): model = self.model \ if self.model is not None \ else resnet9(in_channels=3, num_classes=10) return model def get_criterion(self, stage: str): return nn.CrossEntropyLoss() def get_optimizer(self, stage: str, model): return optim.Adam(model.parameters(), lr=1e-3) def get_scheduler(self, stage: str, optimizer): return optim.lr_scheduler.MultiStepLR(optimizer, [5, 8], gamma=0.3) def get_callbacks(self, stage: str): return { "criterion": dl.CriterionCallback( metric_key="loss", input_key="logits", target_key="targets" ), "optimizer": dl.OptimizerCallback(metric_key="loss"), "scheduler": dl.SchedulerCallback(loader_key="valid", metric_key="loss"), "accuracy": dl.AccuracyCallback( input_key="logits", target_key="targets", topk_args=(1, 3, 5) ), "checkpoint": dl.CheckpointCallback( self._logdir, loader_key="valid", metric_key="accuracy", minimize=False, save_n_best=1, ), "tqdm": dl.TqdmCallback(), } def handle_batch(self, batch): x, y = batch logits = self.model(x) self.batch = { "features": x, "targets": y, "logits": logits, } logdir = f"logs/{datetime.now().strftime('%Y%m%d-%H%M%S')}" runner = CustomRunner(logdir) runner.run() """ def __init__(self): """Init.""" super().__init__() self._device = None self._rank = 0 self._world_size = 8 self._backend = "xla" @property def rank(self) -> int: """Process rank for distributed training.""" return self._rank @property def world_size(self) -> int: """Process world size for distributed training.""" return self._world_size @property def backend(self) -> Optional[str]: """String identifier for distributed backend.""" return self._backend def barrier(self) -> None: """ Synchronizes all processes. This collective blocks processes until the all runs enter the function. """ xm.rendezvous("barrier") def spawn(self, fn: Callable, *args: Any, **kwargs: Any) -> None: """Spawns abstraction for``nprocs`` creation with specified ``fn`` and ``args``/``kwargs``. Args: fn (function): Function is called as the entrypoint of the spawned process. This function must be defined at the top level of a module so it can be pickled and spawned. This is a requirement imposed by multiprocessing. The function is called as ``fn(i, *args)``, where ``i`` is the process index and ``args`` is the passed through tuple of arguments. *args: Arguments passed to spawn method. **kwargs: Keyword-arguments passed to spawn method. Returns: wrapped function. """ return xmp.spawn( fn, args=(self._world_size,), nprocs=self._world_size, start_method="fork" ) def setup_process(self, rank: int = -1, world_size: int = 1): """Initialize DDP variables and processes. Args: rank: process rank. Default is `-1`. world_size: number of devices in netwok to expect for train. Default is `1`. """ self._rank = rank self._world_size = world_size self._device = xm.xla_device() def sync_tensor(self, tensor: torch.Tensor, mode: str) -> torch.Tensor: """Syncs ``tensor`` over ``world_size`` in distributed mode. Args: tensor: tensor to sync across the processes. mode: tensor synchronization type, should be one of 'sum' or 'mean'. Default is 'mean'. Returns: torch.Tensor with synchronized values. Raises: ValueError: if mode is out of ``sum``, ``mean``. """ # return tensor if mode not in {"sum", "mean"}: raise ValueError(f"Unknown sync_type '{mode}'") if mode == "sum": return xm.all_reduce("sum", tensor) elif mode == "mean": return xm.all_reduce("sum", tensor, scale=1.0 / self.world_size) def sync_metrics(self, metrics: Dict) -> Dict: """Syncs ``metrics`` over ``world_size`` in the distributed mode.""" metrics = { k: xm.mesh_reduce(k, v.item() if isinstance(v, torch.Tensor) else v, np.mean) for k, v in metrics.items() } return metrics def optimizer_step(self, loss, model, optimizer) -> None: """Abstraction over ``optimizer.step()`` step.""" xm.optimizer_step(optimizer) def autocast_loader(self, loader: DataLoader): """Loader wrapper for the distributed mode.""" return ParallelLoader(loader, [self.device]).per_device_loader(self.device) __all__ = ["XLAEngine", "DistributedXLAEngine"]
<filename>benchmarks/midi_msg.py """ msg.py - MIDI messages http://www.midi.org/techspecs/midimessages.php New messages are created with mido.new() or mido.Message(), which both return a message object. """ from __future__ import print_function from collections import namedtuple # Pitchwheel is a 14 bit signed integer PITCHWHEEL_MIN = -8192 PITCHWHEEL_MAX = 8191 Spec = namedtuple('Spec', ('status_byte', 'type', 'args', 'size')) _MSG_SPECS = [ # # MIDI message specifications # # This is the authorative definition of message types. # # # Channel messages # # pitchwheel value is a signed integer in the range -8192 - 8191 # Spec(0x80, 'note_off', ('channel', 'note', 'velocity'), 3), Spec(0x90, 'note_on', ('channel', 'note', 'velocity'), 3), Spec(0xa0, 'polytouch', ('channel', 'note', 'value'), 3), Spec(0xb0, 'control_change', ('channel', 'control', 'value'), 3), Spec(0xc0, 'program_change', ('channel', 'program',), 3), Spec(0xd0, 'aftertouch', ('channel', 'value',), 3), Spec(0xe0, 'pitchwheel', ('channel', 'value',), 3), # # System common messages # # songpos.pos is 14 bit unsigned int, # seralized as lsb msb # # Todo: rename song to song_select? # # Sysex messages have a potentially infinite size. # Spec(0xf0, 'sysex', ('data',), float('inf')), Spec(0xf1, 'undefined_f1', (), 1), Spec(0xf2, 'songpos', ('pos',), 3), Spec(0xf3, 'song', ('song',), 2), Spec(0xf4, 'undefined_f4', (), 1), Spec(0xf5, 'undefined_f5', (), 1), Spec(0xf6, 'tune_request', (), 1), Spec(0xf7, 'sysex_end', (), 1), # # System realtime messages These can interleave other messages but # they have no data bytes, so that's OK # Spec(0xf8, 'clock', (), 1), Spec(0xf9, 'undefined_f9', (), 1), Spec(0xfa, 'start', (), 1), # Note: 'continue' is a keyword in python, so is # is bound to protomidi.msg.continue_ Spec(0xfb, 'continue', (), 1), Spec(0xfc, 'stop', (), 1), Spec(0xfd, 'undefined_fd', (), 1), Spec(0xfe, 'active_sensing', (), 1), Spec(0xff, 'reset', (), 1), ] # Dictionary for looking up Channel messages have status byte keys for # all channels. This means there are keys for all bytes in range # range(128, 256). _SPEC_LOOKUP = {} # Filled in by _init() def assert_databyte(value): if not (isinstance(value, int) and (0 <= value < 128)): raise ValueError('data byte must be and int in range(0, 128)') class Message(object): """ MIDI message class. New messages are created with mido.new() or mido.Message(). Valid arguments are: mido.new('note_off', channel=0, note=0, velocity=0, time=0) mido.new('note_on', channel=0, note=0, velocity=0, time=0) mido.new('polytouch', channel=0, note=0, value=0, time=0) mido.new('control_change', channel=0, control=0, value=0, time=0) mido.new('program_change', channel=0, program=0, time=0) mido.new('aftertouch', channel=0, value=0, time=0) mido.new('pitchwheel', channel=0, value=0, time=0) mido.new('sysex', data=(), time=0) mido.new('undefined_f1', time=0) mido.new('songpos', pos=0, time=0) mido.new('song', song=0, time=0) mido.new('undefined_f4', time=0) mido.new('undefined_f5', time=0) mido.new('tune_request', time=0) mido.new('sysex_end', time=0) mido.new('clock', time=0) mido.new('undefined_f9', time=0) mido.new('start', time=0) mido.new('continue', time=0) mido.new('stop', time=0) mido.new('undefined_fd', time=0) mido.new('active_sensing', time=0) mido.new('reset', time=0) """ def __init__(self, type_or_status_byte, **kw): try: spec = _SPEC_LOOKUP[type_or_status_byte] except KeyError: fmt = '{!r} is an invalid type name or status byte' raise ValueError(fmt.format(type_or_status_byte)) self.__dict__['spec'] = spec self.__dict__['type'] = self.spec.type # # Set default values for attributes # self.__dict__['time'] = 0 for name in self.spec.args: if name == 'data': self.__dict__['data'] = () elif name == 'channel': # This is a channel message, so if the first # arguent to this function was a status_byte, # the lower 4 bits will contain the channel. if isinstance(type_or_status_byte, int): self.__dict__['channel'] = type_or_status_byte & 0x0f else: self.__dict__['channel'] = 0 else: self.__dict__[name] = 0 # # Override attibutes with keyword arguments # for name, value in kw.items(): try: setattr(self, name, value) except AttributeError: fmt = '{!r} is an invalid keyword argument for this message' raise ValueError(fmt.format(name)) def copy(self, **override): """ Return a copy of the message. Attributes can be overriden by passing keyword arguments. msg = Message('note_on', note=20, velocity=64) # Create a note_on msg2 = msg.copy(velocity=32) # New note_on with softer velocity """ # Get values from this object kw = {'time': self.time} for name in self.spec.args: kw[name] = getattr(self, name) # Override kw.update(override) return Message(self.type, **kw) def __setattr__(self, name, value): # Todo: validation if name in self.spec.args or name == 'time': if name == 'time': if not (isinstance(value, int) or isinstance(value, float)): raise ValueError('time must be a number') elif name == 'channel': if not (isinstance(value, int) and (0 <= value < 16)): raise ValueError('channel must be an int in range(0, 16)') elif name == 'pos': if not (isinstance(value, int) and (0 <= value < 32768)): raise ValueError('pos must be an int in range(0, 32768)') elif name == 'value' and self.type == 'pitchwheel': if not (isinstance(value, int) and (PITCHWHEEL_MIN <= value <= PITCHWHEEL_MAX)): fmt = 'pitchwheel value must be an int in range({}, {})' raise ValueError(fmt.format(PITCHWHEEL_MIN, PITCHWHEEL_MAX)) elif name == 'data': value = tuple(value) # Make the data bytes immutable for byte in value: assert_databyte(byte) else: assert_databyte(value) self.__dict__[name] = value else: fmt = '{} message has no {!r} attribute' raise AttributeError(fmt.format(self.type, name)) def __delattr__(self, name): raise AttributeError('Message attributes can\'t be deleted') def _get_status_byte(self): """ Compute and return status byte. For channel messages, the channel will be added to the status_byte. """ # Add channel to status byte. sb = self.spec.status_byte if sb <= 0xf0: sb |= self.channel return sb status_byte = property(fget=_get_status_byte) del _get_status_byte def bytes(self): """ Encode message and return as a list of bytes. """ b = [self.status_byte] for name in self.spec.args: if name == 'channel': continue # We already have this elif name == 'data': b.extend(self.data) elif self.type == 'pitchwheel' and name == 'value': value = self.value + (2 ** 13) lsb = value & 0x7f msb = value >> 7 b.append(lsb) b.append(msb) elif self.type == 'songpos' and name == 'pos': # Convert 14 bit value to two 7-bit values # Todo: check if this is correct lsb = self.pos & 0x7f b.append(lsb) msb = self.pos >> 7 b.append(msb) else: # Ordinary data byte b.append(getattr(self, name)) if self.type == 'sysex': # Append a sysex_end b.append(0xf7) return b def bin(self): """ Encode message and return as a bytearray(). """ # Todo: bytearray() or bytes() return bytearray(self.bytes()) def hex(self, sep=' '): """ Encode message and return as a string of hex numbers, separated by the string sep. The default separator is a single space. """ return sep.join(['{:02X}'.format(byte) for byte in self.bytes()]) def __repr__(self): args = [repr(self.type)] args.extend('{}={!r}'.format(name, getattr(self, name)) for name in list(self.spec.args)) args.append('time') args = ', '.join(args) return 'mido.Message({})'.format(args) def __eq__(self, other): """ Compares message type and message specific attributes. (For example (msg.type, msg.channel, msg.note, msg.velocity). The time, spec and status_byte attributes are not compared. """ if not isinstance(other, Message): raise TypeError('comparison between Message and another type') def key(msg): """ Return a key for comparison. The key for 'note_on' is (msg.type, msg.channel, msg.note, msg.velocity). """ k = tuple([msg.type] + [getattr(msg, a) for a in msg.spec.args]) return k return key(self) == key(other) def build_signature(spec, include_type=True): """ Builds a contructor signature for a message. This is used to create documentation. """ if include_type: parts = [repr(spec.type)] else: parts = [] for name in spec.args + ('time',): if name == 'data': parts.append('data=()') else: parts.append(name + '=0') sig = '(' + ', '.join(parts) + ')' return sig def _print_signatures(): """ Print arguments for mido.new() for all supported message types. This will be used to generate documentation. """ for spec in _MSG_SPECS: sig = build_signature(spec) print('mido.new {}'.format(sig)) def _init(): """ Initialize the module. This build a lookup table for message specs with keys for every valid message type and status byte. """ for spec in _MSG_SPECS: if spec.status_byte < 0xf0: # Channel message. # The upper 4 bits are message type, and # the lower 4 are MIDI channel. # We need lookup for all 16 MIDI channels. for channel in range(16): _SPEC_LOOKUP[spec.status_byte | channel] = spec else: _SPEC_LOOKUP[spec.status_byte] = spec _SPEC_LOOKUP[spec.type] = spec _init() def serialized_messages(): # Import like above, or just paste this at the end of msg.py a = Message('note_off', channel=0, note=60, velocity=64) b = Message('note_on', channel=0, note=60, velocity=126) c = a.copy(note=62) d = Message(0x92) # Create Message by status_byte abytes = a.bytes() bbytes = b.bytes() cbytes = c.bytes() return abytes + bbytes + cbytes + d.bytes() if __name__ == '__main__': for _ in xrange(2000): serialized_messages()
import torch import torch.nn as nn import torch.nn.functional as F from DiffNet.networks.dgcnn import DGCNN2D class ConvNet(nn.Module): def __init__(self, inchannels, outchannels, hchannels, kernel=2, nonlin=nn.ReLU(), final_nonlin=nn.Identity()): super(ConvNet, self).__init__() self.in_channels, self.out_channels = inchannels, outchannels self.nhidden = len(hchannels) channels = [inchannels] + hchannels + [outchannels] self.nonlin = [nonlin for k in range(self.nhidden)] + [final_nonlin] self.conv = nn.ModuleList( [ nn.ConvTranspose1d(channels[k], channels[k+1], kernel, stride=2) for k in range(self.nhidden + 1) ] ) def forward(self, x): for conv, nlin in zip(self.conv, self.nonlin): x = nlin(conv(x)) return x class LinearNet(nn.Module): def __init__(self, insize, outsize, hsizes, nonlin=nn.LeakyReLU(), final_nonlin=nn.Identity()): super(LinearNet, self).__init__() #### pulled from neuromancer MLP class self.in_features, self.out_features = insize, outsize self.nhidden = len(hsizes) sizes = [insize] + hsizes + [outsize] self.nonlin = [nonlin for k in range(self.nhidden)] + [final_nonlin] self.linear = nn.ModuleList( [ nn.Linear(sizes[k], sizes[k+1]) for k in range(self.nhidden + 1) ] ) def forward(self, x): for lin, nlin in zip(self.linear, self.nonlin): x = nlin(lin(x)) return x # class ImmDiff(nn.Module): # def __init__(self, out_channels): # super(ImmDiff, self).__init__() # self.nurbs_to_img = ConvNet(1000, 32, [500 for i in range(3)], nonlin=torch.sin) # # self.linear_net = LinearNet(2000, 1024, [1500 for i in range(3)]) # self.up_conv_1 = nn.ConvTranspose2d(1, 2, kernel_size=2, stride=2) # self.up_conv_2 = nn.ConvTranspose2d(2, out_channels, kernel_size=2, stride=2) # def forward(self, x): # x = torch.tanh(self.nurbs_to_img(x)).unsqueeze(1) # x = torch.tanh(self.up_conv_1(x)) # return self.up_conv_2(x) # class ImmDiff(nn.Module): # def __init__(self, out_channels): # super(ImmDiff, self).__init__() # # self.nurbs_to_img = ConvNet(1000, 32, [500 for i in range(3)], nonlin=torch.sin) # self.linear_net = LinearNet(2000, 1024, [1500 for i in range(3)]) # self.up_conv_1 = nn.ConvTranspose2d(1, 2, kernel_size=2, stride=2) # self.up_conv_2 = nn.ConvTranspose2d(2, out_channels, kernel_size=2, stride=2) # def forward(self, x): # x = torch.tanh(self.linear_net(x.flatten())) # x = torch.tanh(self.up_conv_1(x)) # return self.up_conv_2(x) class ImmDiff(nn.Module): def __init__(self, out_channels): super(ImmDiff, self).__init__() self.out_channels = out_channels self.linear_net = LinearNet(2000, 1024, [1500 for i in range(6)], final_nonlin=nn.LeakyReLU()) self.conv1 = nn.Conv2d(1, 16, kernel_size=4, padding=1) self.conv1_up = nn.ConvTranspose2d(16,32, kernel_size=4, stride=2) self.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding=1) self.conv2_up = nn.ConvTranspose2d(64,128, kernel_size=4) self.conv3 = nn.Conv2d(128, 64, kernel_size=5, padding=1) self.conv3_up = nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2) self.conv4 = nn.Conv2d(32, 16, kernel_size=6, padding=1) self.conv4_up = nn.ConvTranspose2d(16,self.out_channels, kernel_size=4) def forward(self, x): x = self.linear_net(x.flatten(1)) x = torch.reshape(x, (x.shape[0], 1, 32, 32)) x = F.leaky_relu(self.conv1(x)) x = F.leaky_relu(self.conv1_up(x)) x = F.leaky_relu(self.conv2(x)) x = F.leaky_relu(self.conv2_up(x)) x = F.leaky_relu(self.conv3(x)) x = F.leaky_relu(self.conv3_up(x)) x = F.leaky_relu(self.conv4(x)) x = self.conv4_up(x) # print('* '*10) # print(x.shape) # print('* '*10) # exit() return x class IBN_DGCNN2d(nn.Module): def __init__(self): super(IBN_DGCNN2d, self).__init__() self.conv2d = nn.Conv2d(1,1, kernel_size=(5,3), stride=(5,1), padding=(0,1)) self.dgcnn = DGCNN2D(domain_size=128, num_points=40, lowest_size=16) def forward(self, x): x = self.conv2d(x.unsqueeze(1))#.squeeze(1) # print(x.shape) # exit() x = F.leaky_relu(x) x = self.dgcnn(x) return x class ImmDiff_VAE(nn.Module): def __init__(self, out_channels): super(ImmDiff_VAE, self).__init__() self.out_channels = out_channels self.linear_net_mu = LinearNet(2000, 256, [1024 for i in range(6)], final_nonlin=nn.LeakyReLU()) self.linear_net_logvar = LinearNet(2000, 256, [1024 for i in range(6)], final_nonlin=nn.LeakyReLU()) self.conv_up = nn.ConvTranspose2d(1, 16, kernel_size=2, stride=2) self.conv1 = nn.Conv2d(16, 32, kernel_size=4, padding=1) self.conv1_up = nn.ConvTranspose2d(32,64, kernel_size=4, stride=2) self.conv2 = nn.Conv2d(64, 128, kernel_size=5, padding=1) self.conv2_up = nn.ConvTranspose2d(128,128, kernel_size=4) self.conv3 = nn.Conv2d(128, 64, kernel_size=5, padding=1) self.conv3_up = nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2) self.conv4 = nn.Conv2d(32, 16, kernel_size=6, padding=1) self.conv4_up = nn.ConvTranspose2d(16,self.out_channels, kernel_size=4) def reparametrize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps * std + mu def forward(self, x): mu = self.linear_net_mu(x.flatten(1)) logvar = self.linear_net_logvar(x.flatten(1)) z = self.reparametrize(mu, logvar) x = torch.reshape(z, (x.shape[0], 1, 16, 16)) x = F.leaky_relu(self.conv_up(x)) x = F.leaky_relu(self.conv1(x)) x = F.leaky_relu(self.conv1_up(x)) x = F.leaky_relu(self.conv2(x)) x = F.leaky_relu(self.conv2_up(x)) x = F.leaky_relu(self.conv3(x)) x = F.leaky_relu(self.conv3_up(x)) x = F.leaky_relu(self.conv4(x)) x = self.conv4_up(x) # print('* '*10) # print(x.shape) # print('* '*10) # exit() return x, mu, logvar class ImmDiff_Large(nn.Module): def __init__(self, out_channels): super(ImmDiff_Large, self).__init__() self.out_channels = out_channels self.linear_net = LinearNet(2000, 256, [1024 for i in range(7)], final_nonlin=nn.LeakyReLU()) self.resnet = LinearNet(2000, 256, [1024 for i in range(7)], nonlin=torch.tanh, final_nonlin=nn.LeakyReLU()) self.linear_net_sin = LinearNet(2000, 256, [1024 for i in range(7)], nonlin=torch.sin, final_nonlin=nn.LeakyReLU()) self.pc_sparse = nn.Sequential(nn.Conv2d(1,1, kernel_size=(5,2), stride=5), nn.LeakyReLU()) self.pc_sparse_up = nn.Sequential(nn.Linear(200, 256), nn.LeakyReLU()) self.conv_up_1 = nn.ConvTranspose2d(4, 16, kernel_size=2, stride=2) self.conv_up_2 = nn.ConvTranspose2d(16, 32, kernel_size=2, stride=2) self.conv_up_3 = nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2) self.conv1 = nn.Conv2d(16, 32, kernel_size=4, padding=1) self.conv1_up = nn.ConvTranspose2d(32,64, kernel_size=4, stride=2) self.conv2 = nn.Conv2d(64+32, 128, kernel_size=5, padding=1) self.conv2_up = nn.ConvTranspose2d(128,128, kernel_size=4) self.conv3 = nn.Conv2d(128, 128, kernel_size=5, padding=1) self.conv3_up = nn.ConvTranspose2d(128, 32, kernel_size=4, stride=2) self.conv4 = nn.Conv2d(32+32, 16, kernel_size=6, padding=1) self.conv4_up = nn.ConvTranspose2d(16,self.out_channels, kernel_size=4) def forward(self, x): x_lin = self.linear_net(x.flatten(1)) x_res = self.resnet(x.flatten(1)) x_sin = self.linear_net_sin(x.flatten(1)) x_sparse = self.pc_sparse(x.unsqueeze(1)) x_sparse = x_sparse.squeeze(1).squeeze(-1) x_sparse = self.pc_sparse_up(x_sparse) x_lin = torch.reshape(x_lin, (x.shape[0], 1, 16, 16)) x_res = torch.reshape(x_res, (x.shape[0], 1, 16, 16)) x_sin = torch.reshape(x_sin, (x.shape[0], 1, 16, 16)) x_sparse = torch.reshape(x_sparse, (x.shape[0], 1, 16, 16)) x = torch.cat((x_lin, x_res, x_sin, x_sparse),1) x_1 = F.leaky_relu(self.conv_up_1(x)) x_2 = F.leaky_relu(self.conv_up_2(x_1)) x_4 = F.leaky_relu(self.conv_up_3(x_2)) x = F.leaky_relu(self.conv1(x_1)) x = F.leaky_relu(self.conv1_up(x)) x = F.leaky_relu(self.conv2(torch.cat((x, x_2),1))) x = F.leaky_relu(self.conv2_up(x)) x = F.leaky_relu(self.conv3(x)) x = F.leaky_relu(self.conv3_up(x)) x = F.leaky_relu(self.conv4(torch.cat((x, x_4),1))) x = self.conv4_up(x) return x class ImmDiff_Large_normals(nn.Module): def __init__(self, out_channels): super(ImmDiff_Large_normals, self).__init__() self.out_channels = out_channels self.lin_pc = LinearNet(2000, 256, [1024 for i in range(7)], final_nonlin=nn.LeakyReLU()) self.lin_pc_skip = LinearNet(2000, 256, [1024], final_nonlin=nn.LeakyReLU()) self.lin_norm = LinearNet(2000, 256, [1024 for i in range(7)], final_nonlin=nn.LeakyReLU()) self.lin_norm_skip = LinearNet(2000, 256, [1024], final_nonlin=nn.LeakyReLU()) self.conv_up_1 = nn.ConvTranspose2d(4, 16, kernel_size=2, stride=2) self.conv_up_2 = nn.ConvTranspose2d(16, 32, kernel_size=2, stride=2) self.conv_up_3 = nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2) self.conv1 = nn.Conv2d(16, 32, kernel_size=4, padding=1) self.conv1_up = nn.ConvTranspose2d(32,64, kernel_size=4, stride=2) self.conv2 = nn.Conv2d(64+32, 128, kernel_size=5, padding=1) self.conv2_up = nn.ConvTranspose2d(128,128, kernel_size=4) self.conv3 = nn.Conv2d(128, 128, kernel_size=5, padding=1) self.conv3_up = nn.ConvTranspose2d(128, 32, kernel_size=4, stride=2) self.conv4 = nn.Conv2d(32+32, 16, kernel_size=6, padding=1) self.conv4_up = nn.ConvTranspose2d(16,self.out_channels, kernel_size=4) def forward(self, x, y): lin_pc = self.lin_pc(x.flatten(1)) lin_pc_skip = self.lin_pc_skip(x.flatten(1)) x_nrm = self.lin_norm(y.flatten(1)) x_nrm_skip = self.lin_norm_skip(y.flatten(1)) lin_pc = torch.reshape(lin_pc, (x.shape[0], 1, 16, 16)) lin_pc_skip = torch.reshape(lin_pc_skip, (x.shape[0], 1, 16, 16)) x_nrm = torch.reshape(x_nrm, (x.shape[0], 1, 16, 16)) x_nrm_skip = torch.reshape(x_nrm_skip, (x.shape[0], 1, 16, 16)) x = torch.cat((lin_pc, lin_pc_skip, x_nrm, x_nrm_skip),1) x_1 = F.leaky_relu(self.conv_up_1(x)) x_2 = F.leaky_relu(self.conv_up_2(x_1)) x_4 = F.leaky_relu(self.conv_up_3(x_2)) x = F.leaky_relu(self.conv1(x_1)) x = F.leaky_relu(self.conv1_up(x)) x = F.leaky_relu(self.conv2(torch.cat((x, x_2),1))) x = F.leaky_relu(self.conv2_up(x)) x = F.leaky_relu(self.conv3(x)) x = F.leaky_relu(self.conv3_up(x)) x = F.leaky_relu(self.conv4(torch.cat((x, x_4),1))) x = self.conv4_up(x) return x class eikonal_linear(nn.Module): def __init__(self): super(eikonal_linear, self).__init__() self.linear = LinearNet(2000, 1024, [1500 for i in range(2)], nonlin=torch.sin) def forward(self, x): x = self.linear(x.flatten(1)) x = torch.reshape(x, (x.shape[0], 1, 32, 32)) y = torch.ones_like(x) z = torch.cat((x,y),1) return z
<gh_stars>1-10 from django.http import Http404 from rest_framework.views import APIView from pandas_drf_tools import mixins class GenericDataFrameAPIView(APIView): """Base class for all other generic DataFrame views. It is based on GenericAPIView.""" # You'll need to either set these attributes, # or override `get_dataframe()`/`get_serializer_class()`. # If you are overriding a view method, it is important that you call # `get_dataframe()` instead of accessing the `dataframe` property directly, # as `dataframe` will get evaluated only once, and those results are cached # for all subsequent requests. dataframe = None serializer_class = None # If you want to use object lookups other than index, set 'lookup_url_kwarg'. # For more complex lookup requirements override `get_object()`. lookup_url_kwarg = 'index' # The style to use for dataframe pagination. pagination_class = None def get_dataframe(self): """ Get the DataFrame for this view. Defaults to using `self.dataframe`. This method should always be used rather than accessing `self.dataframe` directly, as `self.dataframe` gets evaluated only once, and those results are cached for all subsequent requests. You may want to override this if you need to provide different dataframes depending on the incoming request. """ assert self.dataframe is not None, ( "'%s' should either include a `dataframe` attribute, " "or override the `get_dataframe()` method." % self.__class__.__name__ ) dataframe = self.dataframe return dataframe def update_dataframe(self, dataframe): """ Indicates that the dataframe needs to be updated. The default implementation just returns the argument. This method has to be ovewritten to make changing operations stick. """ return dataframe def index_row(self, dataframe): """ Indexes the row based on the request parameters. """ return dataframe.loc[self.kwargs[self.lookup_url_kwarg]].to_frame().T def get_object(self): """ Returns the row the view is displaying. You may want to override this if you need to provide non-standard queryset lookups. Eg if objects are referenced using multiple keyword arguments in the url conf. """ dataframe = self.filter_dataframe(self.get_dataframe()) assert self.lookup_url_kwarg in self.kwargs, ( 'Expected view %s to be called with a URL keyword argument ' 'named "%s". Fix your URL conf, or set the `.lookup_field` ' 'attribute on the view correctly.' % (self.__class__.__name__, self.lookup_url_kwarg) ) try: obj = self.index_row(dataframe) except (IndexError, KeyError, ValueError): raise Http404 # May raise a permission denied self.check_object_permissions(self.request, obj) return obj def get_serializer(self, *args, **kwargs): """ Return the serializer instance that should be used for validating and deserializing input, and for serializing output. """ serializer_class = self.get_serializer_class() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs) def get_serializer_class(self): """ Return the class to use for the serializer. Defaults to using `self.serializer_class`. You may want to override this if you need to provide different serializations depending on the incoming request. (Eg. admins get full serialization, others get basic serialization) """ assert self.serializer_class is not None, ( "'%s' should either include a `serializer_class` attribute, " "or override the `get_serializer_class()` method." % self.__class__.__name__ ) return self.serializer_class def get_serializer_context(self): """ Extra context provided to the serializer class. """ return { 'request': self.request, 'format': self.format_kwarg, 'view': self } def filter_dataframe(self, dataframe): """ Given a dataframe, filter it. """ return dataframe @property def paginator(self): """ The paginator instance associated with the view, or `None`. """ if not hasattr(self, '_paginator'): if self.pagination_class is None: self._paginator = None else: self._paginator = self.pagination_class() return self._paginator def paginate_dataframe(self, dataframe): """ Return a single page of results, or `None` if pagination is disabled. """ if self.paginator is None: return None return self.paginator.paginate_dataframe(dataframe, self.request, view=self) def get_paginated_response(self, data): """ Return a paginated style `Response` object for the given output data. """ assert self.paginator is not None return self.paginator.get_paginated_response(data) # Concrete view classes that provide method handlers # by composing the mixin classes with the base view. class CreateAPIView(mixins.CreateDataFrameMixin, GenericDataFrameAPIView): """ Concrete view for creating a model instance. """ def post(self, request, *args, **kwargs): return self.create(request, *args, **kwargs) class ListAPIView(mixins.ListDataFrameMixin, GenericDataFrameAPIView): """ Concrete view for listing a queryset. """ def get(self, request, *args, **kwargs): return self.list(request, *args, **kwargs) class RetrieveAPIView(mixins.RetrieveDataFrameMixin, GenericDataFrameAPIView): """ Concrete view for retrieving a model instance. """ def get(self, request, *args, **kwargs): return self.retrieve(request, *args, **kwargs) class DestroyAPIView(mixins.DestroyDataFrameMixin, GenericDataFrameAPIView): """ Concrete view for deleting a model instance. """ def delete(self, request, *args, **kwargs): return self.destroy(request, *args, **kwargs) class UpdateAPIView(mixins.UpdateDataFrameMixin, GenericDataFrameAPIView): """ Concrete view for updating a model instance. """ def put(self, request, *args, **kwargs): return self.update(request, *args, **kwargs) def patch(self, request, *args, **kwargs): return self.partial_update(request, *args, **kwargs) class ListCreateAPIView(mixins.ListDataFrameMixin, mixins.CreateDataFrameMixin, GenericDataFrameAPIView): """ Concrete view for listing a queryset or creating a model instance. """ def get(self, request, *args, **kwargs): return self.list(request, *args, **kwargs) def post(self, request, *args, **kwargs): return self.create(request, *args, **kwargs) class RetrieveUpdateAPIView(mixins.RetrieveDataFrameMixin, mixins.UpdateDataFrameMixin, GenericDataFrameAPIView): """ Concrete view for retrieving, updating a model instance. """ def get(self, request, *args, **kwargs): return self.retrieve(request, *args, **kwargs) def put(self, request, *args, **kwargs): return self.update(request, *args, **kwargs) def patch(self, request, *args, **kwargs): return self.partial_update(request, *args, **kwargs) class RetrieveDestroyAPIView(mixins.RetrieveDataFrameMixin, mixins.DestroyDataFrameMixin, GenericDataFrameAPIView): """ Concrete view for retrieving or deleting a model instance. """ def get(self, request, *args, **kwargs): return self.retrieve(request, *args, **kwargs) def delete(self, request, *args, **kwargs): return self.destroy(request, *args, **kwargs) class RetrieveUpdateDestroyAPIView(mixins.RetrieveDataFrameMixin, mixins.UpdateDataFrameMixin, mixins.DestroyDataFrameMixin, GenericDataFrameAPIView): """ Concrete view for retrieving, updating or deleting a model instance. """ def get(self, request, *args, **kwargs): return self.retrieve(request, *args, **kwargs) def put(self, request, *args, **kwargs): return self.update(request, *args, **kwargs) def patch(self, request, *args, **kwargs): return self.partial_update(request, *args, **kwargs) def delete(self, request, *args, **kwargs): return self.destroy(request, *args, **kwargs)
import time import numpy as np import rospy import tf from nav_msgs.msg import Odometry from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3 from tracker.filters.robot_kalman_filter import RobotFilter from tracker.vision.vision_receiver import VisionReceiver from tracker.constants import TrackerConst class ROSTracker: MAX_ROBOT_PER_TEAM = TrackerConst.MAX_ROBOT_PER_TEAM STATE_PREDICTION_TIME = TrackerConst.STATE_PREDICTION_TIME MAX_UNDETECTED_DELAY = TrackerConst.MAX_UNDETECTED_DELAY def __init__(self, name, vision_address): rospy.init_node(name) rospy.loginfo('Created ros node {}'.format(name)) # init dict to store all publishers self.pub_dict = {} self.last_sending_time = time.time() self.vision_receiver = VisionReceiver(vision_address) rospy.loginfo('VisionReceiver created. ({}:{})'.format(*vision_address)) self.robots = [RobotFilter() for _ in range(ROSTracker.MAX_ROBOT_PER_TEAM)] self._current_timestamp = None @property def current_timestamp(self): return self._current_timestamp def start(self): self.vision_receiver.start() self.tracker_main_loop() def tracker_main_loop(self): while not rospy.is_shutdown(): detection_frame = self.vision_receiver.get() self._current_timestamp = detection_frame.t_capture for robot_obs in detection_frame.robots_blue: obs_state = np.array([robot_obs.x, robot_obs.y, robot_obs.orientation]) self.robots[robot_obs.robot_id].update(obs_state, detection_frame.t_capture) self.robots[robot_obs.robot_id].predict(ROSTracker.STATE_PREDICTION_TIME) for robot_obs in detection_frame.robots_yellow: obs_state = np.array([robot_obs.x, robot_obs.y, robot_obs.orientation]) self.robots[robot_obs.robot_id].update(obs_state, detection_frame.t_capture) self.robots[robot_obs.robot_id].predict(ROSTracker.STATE_PREDICTION_TIME) self.remove_undetected_robot() if self.robots: self.pub_odom_msgs(self.robots) else: rospy.logwarn("No robots found...") def remove_undetected_robot(self): for robot in self.robots: if robot.last_t_capture + ROSTracker.MAX_UNDETECTED_DELAY < self.current_timestamp: robot.is_active = False def pub_odom_msgs(self, robots): for robot_id, robot in enumerate(robots): if robot.is_active: if robot_id not in self.pub_dict: topic_name = "tracked_robot_" + str(robot_id) self.pub_dict[robot_id] = rospy.Publisher(topic_name, Odometry, queue_size=10) rospy.loginfo('Created new topic {}'.format(topic_name)) # get position and kinematic info from robot robot_pose = tuple(robot.pose) # position (x, y, yaw) robot_vel = tuple(robot.velocity) # velocity (x, y, yaw) # create odom message odom = Odometry() odom.header.stamp = rospy.Time.now() # off by ~2/100 sec compared to self.current_timestamp # convert yaw to euler to quaternion quat = tf.transformations.quaternion_from_euler(0, 0, robot.get_orientation) # set the pose odom.pose.pose = Pose(Point(robot_pose[0] / 1000, robot_pose[1] / 1000, 0), Quaternion(*quat)) # set the twist (first part is linear velocity, second is angular) odom.twist.twist = Twist(Vector3(robot_vel[0] / 1000, robot_vel[1] / 1000, 0), Vector3(0, 0, robot_vel[2])) # publish message self.pub_dict[robot_id].publish(odom)
from __future__ import print_function import subprocess import tempfile import os import argparse import sys import stat import logging def popen(cmd): logger.info('Running the following command: %s', ' '.join(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() logger.debug('Command stdout: %s', stdout) logger.debug('Command stderr: %s', stderr) return {'cmd': cmd, 'returncode': proc.returncode, 'stdout': stdout, 'stderr': stderr} class CertificateKeyPair(object): def __init__(self, keyLocation, certLocation, dn=""): super(CertificateKeyPair, self).__init__() self.keyLocation = keyLocation self.certLocation = certLocation self.signingPolicyLocation = "" self.dn = dn self.subject_hash = "" self.subject_hash_old = "" class CertificateGenerator(object): supportedMessageDigests = ["md2", "md4", "md5", "mdc2", "sha1", "sha224", "sha256", "sha384", "sha512"] def __init__(self, work_dir=''): super(CertificateGenerator, self).__init__() self._ca = None self.work_dir = work_dir @staticmethod def checkMessageDigest(messagedigest): if messagedigest not in CertificateGenerator.supportedMessageDigests: logger.error("The message digest \"%s\" is not supported", messagedigest) sys.exit(1) def getCAfiles(self, name="Test CA", work_dir=None): cafiles = {'links': [], 'files': []} if work_dir is None: work_dir = self.work_dir namelen = len(name) dashname = name.replace(" ", "-") # get the symlinks pointing to CA files for fname in os.listdir(work_dir): fpath = os.path.join(work_dir, fname) if os.path.islink(fpath): linkto = os.readlink(fpath).replace(work_dir.rstrip('/') + '/', '') if linkto[0:namelen] == dashname: cafiles['links'].append((fpath, linkto)) # ca files itself for fname in os.listdir(work_dir): fpath = os.path.join(work_dir, fname) if os.path.isfile(fpath): if fname[0:namelen] == dashname: cafiles['files'].append(fpath) return cafiles def cleanupCAfiles(self, name="Test CA"): cafiles = self.getCAfiles(name) for (fpath, linkto) in cafiles['links']: logger.debug('Removing the CA link: %s -> %s', fpath, linkto) os.unlink(fpath) for fpath in cafiles['files']: logger.debug('Removing the CA file: %s', fpath) os.unlink(fpath) def generateCA(self, name="Test CA", validityperiod=30, messagedigest="sha1", use_for_signing=True, force=False): if not isinstance(validityperiod, (int, long)): logger.error("The 'validityperiod' argument must be an integer") sys.exit(1) CertificateGenerator.checkMessageDigest(messagedigest) keyLocation = os.path.join(self.work_dir, name.replace(" ", "-") + "-key.pem") certLocation = os.path.join(self.work_dir, name.replace(" ", "-") + ".pem") if os.path.isfile(keyLocation): if force: logger.info("Key file '%s' already exist. Cleaning up previous Test-CA files.", keyLocation) self.cleanupCAfiles(name) else: logger.error("Error generating CA certificate and key: file '%s' is already exist", keyLocation) sys.exit(1) if os.path.isfile(certLocation): if force: logger.info("Certificate file '%s' already exist. Cleaning up previous Test-CA files.", certLocation) self.cleanupCAfiles(name) else: logger.error("Error generating CA certificate and key: file '%s' is already exist", certLocation) sys.exit(1) subject = "/DC=org/DC=nordugrid/DC=ARC/O=TestCA/CN=" + name logger.info('Generating Test CA %s', subject) if popen(["openssl", "genrsa", "-out", keyLocation, "2048"])["returncode"] != 0: logger.error("Failed to generate CA key") sys.exit(1) if popen(["openssl", "req", "-x509", "-new", "-" + messagedigest, "-subj", subject, "-key", keyLocation, "-out", certLocation, "-days", str(validityperiod)])["returncode"] != 0: logger.error('Failed to self-sign certificate') sys.exit(1) ca = CertificateKeyPair(keyLocation, certLocation, subject) if use_for_signing: self._ca = ca os.chmod(keyLocation, stat.S_IRUSR) os.chmod(certLocation, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) # Order of the -subject_hash and -subject_hash_old flags matters. p_handle = popen(["openssl", "x509", "-subject_hash", "-subject_hash_old", "-noout", "-in", certLocation]) if p_handle["returncode"] == 0: ca.subject_hash, ca.subject_hash_old = p_handle["stdout"].splitlines() # Use relative location. Assume hash link does not already exist (.0). certFilename = name.replace(" ", "-") + ".pem" os.chdir(self.work_dir) logger.info('Linking %s to %s.0', certFilename, ca.subject_hash) os.symlink(certFilename, ca.subject_hash + ".0") logger.info('Linking %s to %s.0', certFilename, ca.subject_hash_old) os.symlink(certFilename, ca.subject_hash_old + ".0") # Signing policy is critical for Globus logger.info('Writing signing_policy file for CA') ca.signingPolicyLocation = os.path.join(self.work_dir, name.replace(" ", "-") + ".signing_policy") signing_policy = '''# EACL ARC Test CA access_id_CA X509 '{subject}' pos_rights globus CA:sign cond_subjects globus '"{cond_subject}/*"' '''.format(subject=subject, cond_subject=subject[:subject.rfind('/')]) with open(ca.signingPolicyLocation, "w") as f_signing: f_signing.write(signing_policy) logger.info('Linking %s to %s.signing_policy', ca.signingPolicyLocation, ca.subject_hash) os.symlink(ca.signingPolicyLocation, ca.subject_hash + ".signing_policy") logger.info('Linking %s to %s.signing_policy', ca.signingPolicyLocation, ca.subject_hash_old) os.symlink(ca.signingPolicyLocation, ca.subject_hash_old + ".signing_policy") else: logger.error('Failed to calculate certificate hash values. Cleaning up generated files.') os.unlink(keyLocation) os.unlink(certLocation) sys.exit(1) return ca def generateHostCertificate(self, hostname, prefix="host", ca=None, validityperiod=30, messagedigest="sha1", force=False): if ca is None and self._ca is None: logger.error("No CA provided") sys.exit(1) if not isinstance(validityperiod, (int, long)): logger.error("The 'validityperiod' argument must be an integer") sys.exit(1) if ca is None: ca = self._ca try: with open(ca.keyLocation, 'r') as ca_key: pass except IOError as e: logger.error("Failed to access Test CA key. Error(%s): %s", e.errno, e.strerror) sys.exit(1) CertificateGenerator.checkMessageDigest(messagedigest) prefix += "-" + hostname.replace(" ", "-") keyLocation = os.path.join(self.work_dir, prefix + "-key.pem") certReqFile, certReqLocation = tempfile.mkstemp('-cert-req.pem', prefix) os.close(certReqFile) certLocation = os.path.join(self.work_dir, prefix + "-cert.pem") if os.path.isfile(keyLocation): if force: logger.info("Key file '%s' already exist. Removing. ", keyLocation) os.unlink(keyLocation) else: logger.error("Error generating host certificate and key: file '%s' already exist", keyLocation) sys.exit(1) if os.path.isfile(certLocation): if force: logger.info("Certificate file '%s' already exist. Removing. ", certLocation) os.unlink(certLocation) else: logger.error("Error generating host certificate and key: file '%s' already exist", certLocation) sys.exit(1) logger.info('Generating host certificate signing request.') subject = "/DC=org/DC=nordugrid/DC=ARC/O=TestCA/CN=host\/" + hostname if popen(["openssl", "genrsa", "-out", keyLocation, "2048"])["returncode"] != 0: logger.error("Failed to generate host key") sys.exit(1) if popen(["openssl", "req", "-new", "-" + messagedigest, "-subj", subject, "-key", keyLocation, "-out", certReqLocation])["returncode"] != 0: logger.error("Failed to generate certificate signing request") sys.exit(1) config_descriptor, config_name = tempfile.mkstemp(prefix="x509v3_config-") config = os.fdopen(config_descriptor, "w") config.write("basicConstraints=CA:FALSE\n") config.write("keyUsage=digitalSignature, nonRepudiation, keyEncipherment\n") config.write("subjectAltName=DNS:" + hostname + "\n") config.close() logger.info('Signing host certificate with Test CA.') if popen(["openssl", "x509", "-req", "-" + messagedigest, "-in", certReqLocation, "-CA", ca.certLocation, "-CAkey", ca.keyLocation, "-CAcreateserial", "-extfile", config_name, "-out", certLocation, "-days", str(validityperiod)])["returncode"] != 0: logger.error("Failed to sign host certificate with Test CA.") sys.exit(1) os.remove(certReqLocation) os.remove(config_name) os.chmod(keyLocation, stat.S_IRUSR) os.chmod(certLocation, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) return CertificateKeyPair(keyLocation, certLocation, subject) def generateClientCertificate(self, name, prefix="client", ca=None, validityperiod=30, messagedigest="sha1"): if ca is None and self._ca is None: logger.error("No CA provided") sys.exit(1) if not isinstance(validityperiod, (int, long)): logger.error("The 'validityperiod' argument must be an integer") sys.exit(1) if ca is None: ca = self._ca try: with open(ca.keyLocation, 'r') as ca_key: pass except IOError as e: logger.error("Failed to access Test CA key. Error(%s): %s", e.errno, e.strerror) sys.exit(1) CertificateGenerator.checkMessageDigest(messagedigest) prefix += "-" + name.replace(" ", "-") keyLocation = os.path.join(self.work_dir, prefix + "-key.pem") certReqFile, certReqLocation = tempfile.mkstemp('-cert-req.pem', prefix) os.close(certReqFile) certLocation = os.path.join(self.work_dir, prefix + "-cert.pem") if os.path.isfile(keyLocation): logger.error("Error generating client certificate and key: file '%s' already exist", keyLocation) sys.exit(1) if os.path.isfile(certLocation): logger.error("Error generating client certificate and key: file '%s' already exist", certLocation) sys.exit(1) logger.info('Generating client certificate signing request.') subject = "/DC=org/DC=nordugrid/DC=ARC/O=TestCA/CN=" + name if popen(["openssl", "genrsa", "-out", keyLocation, "2048"])["returncode"] != 0: logger.error("Failed to generate host key") sys.exit(1) if popen(["openssl", "req", "-new", "-" + messagedigest, "-subj", subject, "-key", keyLocation, "-out", certReqLocation])["returncode"] != 0: logger.error("Failed to generate certificate signing request") sys.exit(1) config_descriptor, config_name = tempfile.mkstemp(prefix="x509v3_config-") config = os.fdopen(config_descriptor, "w") config.write("basicConstraints=CA:FALSE\n") config.write("keyUsage=digitalSignature, nonRepudiation, keyEncipherment\n") config.close() logger.info('Signing client certificate with Test CA.') if popen(["openssl", "x509", "-req", "-" + messagedigest, "-in", certReqLocation, "-CA", ca.certLocation, "-CAkey", ca.keyLocation, "-CAcreateserial", "-extfile", config_name, "-out", certLocation, "-days", str(validityperiod)])["returncode"] != 0: logger.error("Failed to sign user certificate with Test CA") sys.exit(1) os.remove(certReqLocation) os.remove(config_name) os.chmod(keyLocation, stat.S_IRUSR) os.chmod(certLocation, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) return CertificateKeyPair(keyLocation, certLocation, subject) def createParser(): parser = argparse.ArgumentParser(description='Script for generating certificates') parser.add_argument('--CA', help='Generate CA certificate with supplied name') parser.add_argument('--host', help='Generate host certificate with supplied name') parser.add_argument('--client', help='Generate client certificate with supplied name') parser.add_argument('--CA-key-path', help='Path of CA key') parser.add_argument('--CA-cert-path', help='Path of CA certificate') parser.add_argument('--validity', type=int, default=30, help='Number of days the certificates will be valid (default %(default)s)') parser.add_argument('--digest', default="sha1", help='The hash function to use for certificate signing') parser.add_argument('--list-digest', action='store_const', const=True, help='List supported hash functions') return parser if __name__ == "__main__": logger = logging.getLogger('CertificateGenerator') logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler()) parser = createParser() args = parser.parse_args() if args.list_digest: print("Supported hash functions are: %s" % ", ".join(CertificateGenerator.supportedMessageDigests)) sys.exit(0) if args.CA is None and args.host is None and args.client is None: parser.print_help() print("Error: At least one of the options '--CA', '--host', '--client' must be specified.") sys.exit(-1) if args.CA and (args.CA_key_path or args.CA_cert_path): parser.print_help() print("Error: '--CA' may not be specified with either '--CA-key-path' or '--CA-cert-path'.") sys.exit(-1) if args.CA_key_path and not args.CA_cert_path or not args.CA_key_path and args.CA_cert_path: parser.print_help() print("Error: Both '--CA-key-path' and '--CA-cert-path' must be specified.") sys.exit(-1) if (args.host or args.client) and not (args.CA or args.CA_key_path): parser.print_help() print("Error: When generating host or client certificates. " \ "Either '--CA' or path to existing CA certificates must be specified.") sys.exit(-1) try: CertificateGenerator.checkMessageDigest(args.digest) except Exception as e: print(e) print("Supported hash functions are: %s" % ", ".join(CertificateGenerator.supportedMessageDigests)) sys.exit(-1) cc = CertificateGenerator() ca = None if args.CA: print("Generating CA certificate and key.") ca = cc.generateCA(args.CA, validityperiod=args.validity, messagedigest=args.digest) else: print("Using specified CA certificate and key.") ca = CertificateKeyPair(args.CA_key_path, args.CA_cert_path) if args.host: print("Generating host certificate.") cc.generateHostCertificate(args.host, ca=ca, validityperiod=args.validity, messagedigest=args.digest) if args.client: print("Generating client certificate.") cc.generateClientCertificate(args.client, ca=ca, validityperiod=args.validity, messagedigest=args.digest) sys.exit(0) else: logger = logging.getLogger('ARCCTL.CertificateGenerator')
''' Python bindings for libmit. (c) Mit authors 2019-2020 The package is distributed under the MIT/X11 License. THIS PROGRAM IS PROVIDED AS IS, WITH NO WARRANTY. USE IS AT THE USER’S RISK. ''' from ctypes import ( CDLL, CFUNCTYPE, POINTER, c_char_p, c_int, c_size_t, c_ssize_t, c_void_p, pointer, sizeof ) from ctypes.util import find_library from .enums import MitErrorCode, Registers library_file = find_library("mit") if not library_file: # For Windows # TODO: Do this portably # TODO: Substitute version when library is versioned library_file = find_library("libmit-0") assert(library_file) libmit = CDLL(library_file) assert(libmit) # Errors class Error(Exception): ''' An error from the Python bindings for Mit. ''' pass class VMError(Error): ''' An error from Mit. Public fields: - error_code - int - message - str ''' def __init__(self, error_code, message): super().__init__(error_code, message) def errcheck(error_enum): ''' Returns a callback suitable for use as `ctypes._FuncPtr.errcheck`. - code_to_message - a mapping from int to message. If the message is `None` the code is considered to be a success, and `None` is returned. If the code is not found in `code_to_message`: - if an "ok" code exists, an "unknown error" is reported. - otherwise the result is returned unchanged. ''' code_to_message = {error.value: error.name.lower().translate(str.maketrans('_', ' ')) for error in error_enum} require_match = 'ok' in code_to_message.values() def callback(result, _func=None, _args=None): result = int(result) if result in code_to_message: message = code_to_message[result] if message == 'ok': return elif require_match: message = "unknown error!" else: return result raise VMError(result, message) return callback # Types c_word = c_ssize_t c_uword = c_size_t c_mit_fn = CFUNCTYPE( c_word, POINTER(c_word), c_word, POINTER(c_word), c_uword, POINTER(c_uword), ) # Constants word_bytes = sizeof(c_uword) assert word_bytes in (4, 8), f"word_bytes must be 4 or 8 and is {word_bytes}!" word_bit = word_bytes * 8 sign_bit = 1 << (word_bit - 1) hex0x_word_width = word_bytes * 2 + 2 # Width of a hex word with leading "0x" uword_max = c_uword(-1).value # Functions from mit.h # Errors mit_error = errcheck(MitErrorCode) # Bind `mit_run` as a function and as a function pointer, because # for some reason we can't call it when bound as a pointer. _run = c_mit_fn.in_dll(libmit, "mit_run") run_ptr = POINTER(c_mit_fn).in_dll(libmit, "mit_run") # `break_fn_ptr` must be bound as a `c_void_p` in order to be set to point # to a Python callback. break_fn_ptr = c_void_p.in_dll(libmit, "mit_break_fn") stack_words_ptr = pointer(c_uword.in_dll(libmit, "mit_stack_words")) stack_words = c_uword.in_dll(libmit, "mit_stack_words") run_simple = c_mit_fn.in_dll(libmit, "mit_run_simple") run_break = c_mit_fn.in_dll(libmit, "mit_run_break") # run_fast = c_mit_fn.in_dll(libmit, "mit_run_fast") # run_profile = c_mit_fn.in_dll(libmit, "mit_run_profile") # Cannot add errcheck to a CFUNCTYPE, so wrap it manually. def run(pc, ir, stack, stack_words, stack_depth_ptr): return mit_error(_run(pc, ir, stack, stack_words, stack_depth_ptr)) # libmit.mit_profile_reset.restype = None # libmit.mit_profile_reset.argtypes = None # libmit.mit_profile_dump.argtypes = [c_int] def is_aligned(addr): return (addr & (word_bytes - 1)) == 0 def sign_extend(x): if x & sign_bit: x |= -1 & ~uword_max return x argc = c_int.in_dll(libmit, "mit_argc") argv = POINTER(c_char_p).in_dll(libmit, "mit_argv") def register_args(*args): ''' Set `mit_argc` and `mit_argv`. - args - an iterable of `str` and/or `bytes`. ''' bargs = [] for arg in args: if isinstance(arg, str): arg = bytes(arg, 'utf-8') assert isinstance(arg, bytes) bargs.append(arg) global argc, argv argc.value = len(bargs) argv.contents = (c_char_p * len(bargs))(*bargs)
<filename>models/np.py import torch from torch import nn from torch.distributions import Normal from torch.nn import functional as F from utils import img_mask_to_np_input class Encoder(nn.Module): """Maps an (x_i, y_i) pair to a representation r_i. Parameters ---------- x_dim : int Dimension of x values. y_dim : int Dimension of y values. h_dim : int Dimension of hidden layer. r_dim : int Dimension of output representation r. """ def __init__(self, x_dim, y_dim, h_dim, r_dim): super(Encoder, self).__init__() self.x_dim = x_dim self.y_dim = y_dim self.h_dim = h_dim self.r_dim = r_dim layers = [nn.Linear(x_dim + y_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, r_dim)] self.input_to_hidden = nn.Sequential(*layers) def forward(self, x, y): """ x : torch.Tensor Shape (batch_size, x_dim) y : torch.Tensor Shape (batch_size, y_dim) """ input_pairs = torch.cat((x, y), dim=1) return self.input_to_hidden(input_pairs) class MuSigmaEncoder(nn.Module): """ Maps a representation r to mu and sigma which will define the normal distribution from which we sample the latent variable z. Parameters ---------- r_dim : int Dimension of output representation r. z_dim : int Dimension of latent variable z. """ def __init__(self, r_dim, z_dim): super(MuSigmaEncoder, self).__init__() self.r_dim = r_dim self.z_dim = z_dim self.r_to_hidden = nn.Linear(r_dim, r_dim) self.hidden_to_mu = nn.Linear(r_dim, z_dim) self.hidden_to_sigma = nn.Linear(r_dim, z_dim) def forward(self, r): """ r : torch.Tensor Shape (batch_size, r_dim) """ hidden = torch.relu(self.r_to_hidden(r)) mu = self.hidden_to_mu(hidden) # Define sigma following convention in "Empirical Evaluation of Neural # Process Objectives" and "Attentive Neural Processes" sigma = 0.1 + 0.9 * torch.sigmoid(self.hidden_to_sigma(hidden)) return mu, sigma class Decoder(nn.Module): """ Maps target input x_target and samples z (encoding information about the context points) to predictions y_target. Parameters ---------- x_dim : int Dimension of x values. z_dim : int Dimension of latent variable z. h_dim : int Dimension of hidden layer. y_dim : int Dimension of y values. """ def __init__(self, x_dim, z_dim, h_dim, y_dim): super(Decoder, self).__init__() self.x_dim = x_dim self.z_dim = z_dim self.h_dim = h_dim self.y_dim = y_dim layers = [nn.Linear(x_dim + z_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim), nn.ReLU(inplace=True)] self.xz_to_hidden = nn.Sequential(*layers) self.hidden_to_mu = nn.Linear(h_dim, y_dim) self.hidden_to_sigma = nn.Linear(h_dim, y_dim) def forward(self, x, z): """ x : torch.Tensor Shape (batch_size, num_points, x_dim) z : torch.Tensor Shape (batch_size, z_dim) Returns ------- Returns mu and sigma for output distribution. Both have shape (batch_size, num_points, y_dim). """ batch_size, num_points, _ = x.size() # Repeat z, so it can be concatenated with every x. This changes shape # from (batch_size, z_dim) to (batch_size, num_points, z_dim) z = z.unsqueeze(1).repeat(1, num_points, 1) # Flatten x and z to fit with linear layer x_flat = x.view(batch_size * num_points, self.x_dim) z_flat = z.view(batch_size * num_points, self.z_dim) # Input is concatenation of z with every row of x input_pairs = torch.cat((x_flat, z_flat), dim=1) hidden = self.xz_to_hidden(input_pairs) mu = self.hidden_to_mu(hidden) pre_sigma = self.hidden_to_sigma(hidden) # Reshape output into expected shape mu = mu.view(batch_size, num_points, self.y_dim) pre_sigma = pre_sigma.view(batch_size, num_points, self.y_dim) # Define sigma following convention in "Empirical Evaluation of Neural # Process Objectives" and "Attentive Neural Processes" sigma = 0.1 + 0.9 * F.softplus(pre_sigma) return mu, sigma class NeuralProcess(nn.Module): """ Implements Neural Process for functions of arbitrary dimensions. Parameters ---------- x_dim : int Dimension of x values. y_dim : int Dimension of y values. r_dim : int Dimension of output representation r. z_dim : int Dimension of latent variable z. h_dim : int Dimension of hidden layer in encoder and decoder. """ def __init__(self, x_dim, y_dim, r_dim, z_dim, h_dim): super(NeuralProcess, self).__init__() self.x_dim = x_dim self.y_dim = y_dim self.r_dim = r_dim self.z_dim = z_dim self.h_dim = h_dim # Initialize networks self.xy_to_r = Encoder(x_dim, y_dim, h_dim, r_dim) self.r_to_mu_sigma = MuSigmaEncoder(r_dim, z_dim) self.xz_to_y = Decoder(x_dim, z_dim, h_dim, y_dim) def aggregate(self, r_i): """ Aggregates representations for every (x_i, y_i) pair into a single representation. Parameters ---------- r_i : torch.Tensor Shape (batch_size, num_points, r_dim) """ return torch.mean(r_i, dim=1) def xy_to_mu_sigma(self, x, y): """ Maps (x, y) pairs into the mu and sigma parameters defining the normal distribution of the latent variables z. Parameters ---------- x : torch.Tensor Shape (batch_size, num_points, x_dim) y : torch.Tensor Shape (batch_size, num_points, y_dim) """ batch_size, num_points, _ = x.size() # Flatten tensors, as encoder expects one dimensional inputs x_flat = x.view(batch_size * num_points, self.x_dim) y_flat = y.contiguous().view(batch_size * num_points, self.y_dim) # Encode each point into a representation r_i r_i_flat = self.xy_to_r(x_flat, y_flat) # Reshape tensors into batches r_i = r_i_flat.view(batch_size, num_points, self.r_dim) # Aggregate representations r_i into a single representation r r = self.aggregate(r_i) # Return parameters of distribution return self.r_to_mu_sigma(r) def forward(self, x_context, y_context, x_target, y_target=None): """ Given context pairs (x_context, y_context) and target points x_target, returns a distribution over target points y_target. Parameters ---------- x_context : torch.Tensor Shape (batch_size, num_context, x_dim). Note that x_context is a subset of x_target. y_context : torch.Tensor Shape (batch_size, num_context, y_dim) x_target : torch.Tensor Shape (batch_size, num_target, x_dim) y_target : torch.Tensor or None Shape (batch_size, num_target, y_dim). Only used during training. Note ---- We follow the convention given in "Empirical Evaluation of Neural Process Objectives" where context is a subset of target points. This was shown to work best empirically. """ # Infer quantities from tensor dimensions batch_size, num_context, x_dim = x_context.size() _, num_target, _ = x_target.size() _, _, y_dim = y_context.size() if self.training: # Encode target and context (context needs to be encoded to # calculate kl term) mu_target, sigma_target = self.xy_to_mu_sigma(x_target, y_target) mu_context, sigma_context = self.xy_to_mu_sigma(x_context, y_context) # Sample from encoded distribution using reparameterization trick q_target = Normal(mu_target, sigma_target) q_context = Normal(mu_context, sigma_context) z_sample = q_target.rsample() # Get parameters of output distribution y_pred_mu, y_pred_sigma = self.xz_to_y(x_target, z_sample) p_y_pred = Normal(y_pred_mu, y_pred_sigma) return p_y_pred, q_target, q_context else: # At testing time, encode only context mu_context, sigma_context = self.xy_to_mu_sigma(x_context, y_context) # Sample from distribution based on context q_context = Normal(mu_context, sigma_context) z_sample = q_context.rsample() # Predict target points based on context y_pred_mu, y_pred_sigma = self.xz_to_y(x_target, z_sample) p_y_pred = Normal(y_pred_mu, y_pred_sigma) return p_y_pred class NeuralProcessImg(nn.Module): """ Wraps regular Neural Process for image processing. Parameters ---------- img_size : tuple of ints E.g. (1, 28, 28) or (3, 32, 32) r_dim : int Dimension of output representation r. z_dim : int Dimension of latent variable z. h_dim : int Dimension of hidden layer in encoder and decoder. """ def __init__(self, img_size, r_dim, z_dim, h_dim): super(NeuralProcessImg, self).__init__() self.img_size = img_size self.num_channels, self.height, self.width = img_size self.r_dim = r_dim self.z_dim = z_dim self.h_dim = h_dim self.neural_process = NeuralProcess(x_dim=2, y_dim=self.num_channels, r_dim=r_dim, z_dim=z_dim, h_dim=h_dim) def forward(self, img, context_mask, target_mask): """ Given an image and masks of context and target points, returns a distribution over pixel intensities at the target points. Parameters ---------- img : torch.Tensor Shape (batch_size, channels, height, width) context_mask : torch.ByteTensor Shape (batch_size, height, width). Binary mask indicating the pixels to be used as context. target_mask : torch.ByteTensor Shape (batch_size, height, width). Binary mask indicating the pixels to be used as target. """ x_context, y_context = img_mask_to_np_input(img, context_mask) x_target, y_target = img_mask_to_np_input(img, target_mask) return self.neural_process(x_context, y_context, x_target, y_target)
import pandas as pd from pandas import Series, DataFrame # numpy, matplotlib, seaborn import numpy as np import matplotlib.pyplot as plt import seaborn as sns #To display header rows and description of the loaded dataset stud_df=pd.read_csv('StudentsPerformance.csv') print("======Data Headers=======") print(stud_df.head()) print("=====Data Description=====") stud_df.info() stud_df.describe() #To remove unnecessary features such as 'lunch' from the data frame stud_df = stud_df.drop(['lunch'], axis=1) print("=====Check if columns were really dropped=====") print(stud_df.head()) #To replace empty column values in ‘parental level of education’ with a default value stud_df['parental level of education'] = stud_df['parental level of education'].fillna("Y") print(stud_df['parental level of education']) #To convert the attribute ‘race/ethnicity’ to have ‘groupA’ to be ‘Asian Students’ etc findv=['group A','group B','group C','group D','group E'] repv=['Asian Students','African Students','Afro-Asian Students','American Students','European Students'] stud_df['race/ethnicity']=stud_df['race/ethnicity'].replace(findv,repv) print(stud_df['race/ethnicity']) #Visualizations #Tally of the Number of Male & Female students who took up the ‘test preparation course’ and those who did not. ax = sns.countplot(x = 'test preparation course', hue = 'gender', palette = 'Set3',data = stud_df) ax.set(title="Course completion based on gender",xlabel="course", ylabel="total") plt.show() #Total Number of Male & Female Students belonging to each student group ax = sns.countplot(x = 'race/ethnicity', hue = 'gender', palette = 'Set2',data = stud_df) ax.set(title="Based on Categories they belong to", xlabel="categories",ylabel="total") plt.show() """ No of students who ‘failed’(less than 40), ‘second class’(between 40 & 50). ‘first class’(between 60 & 75) and ‘distinction’(above 75) """ #in ‘Maths’, interval = (0,40,50,60,75) categories = ['Fail','II class','I class', 'Distinction'] stud_df['Marks_cats'] = pd.cut(stud_df.mathscore, interval, labels = categories) ax = sns.countplot(x = 'Marks_cats', data = stud_df, hue = 'gender', palette = 'Set1') ax.set(xlabel='Marks Categorical', ylabel='Total',title="Math Marks Categorical Distribution") plt.show() #in ‘Writing’. interval = (0,40,50,60,75) categories = ['Fail','II class','I class', 'Distinction'] stud_df['Marks_cats'] = pd.cut(stud_df.writingscore, interval, labels = categories) ax = sns.countplot(x = 'Marks_cats', data = stud_df, hue = 'gender', palette = 'Set2') ax.set(xlabel='Marks Categorical', ylabel='Total',title="Writing Marks Categorical Distribution") plt.show() #in ‘Reading’ interval = (0,40,50,60,75) categories = ['Fail','II class','I class', 'Distinction'] stud_df['Marks_cats'] = pd.cut(stud_df.readingscore, interval, labels = categories) ax = sns.countplot(x = 'Marks_cats', data = stud_df, hue = 'gender', palette = 'Set3') ax.set(xlabel='Marks Categorical', ylabel='Total',title="Reading Marks Categorical Distribution") plt.show() #To find the average Maths, Reading and Writing Score of each Group (Ethnicity) print(stud_df[['race/ethnicity','mathscore','writingscore','readingscore']].groupby(['race/ethnicity'],as_index=True).mean())
<reponame>LanetheGreat/mcmaps # Copyright (c) 1995, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # Ported by <NAME> 2020. # # This code is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License version 2 only, as # published by the Free Software Foundation. Oracle designates this # particular file as subject to the "Classpath" exception as provided # by Oracle in the LICENSE file that accompanied this code. # # This code is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # version 2 for more details (a copy is included in the LICENSE file that # accompanied this code). # # You should have received a copy of the GNU General Public License version # 2 along with this work; if not, write to the Free Software Foundation, # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. # # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. ''' Python3 equivalent of Java's Random library class ''' import ctypes, time from math import log, sqrt __all__ = ['Random'] class _DoubleBits(ctypes.Union): _fields_ = ( ('double_value', ctypes.c_double), ('long_value', ctypes.c_long), ) class Random: ''' An instance of this class is used to generate a stream of pseudorandom numbers. The class uses a 48-bit seed, which is modified using a linear congruential formula. (See <NAME>, _The Art of Computer Programming, Volume 2_, Section 3.2.1.) If two instances of `Random` are created with the same seed, and the same sequence of method calls is made for each, they will generate and return identical sequences of numbers. In order to guarantee this property, particular algorithms are specified for the class `Random`. Java implementations must use all the algorithms shown here for the class `Random`, for the sake of absolute portability of Java code. However, subclasses of class `Random` are permitted to use other algorithms, so long as they adhere to the general contracts for all the methods. The algorithms implemented by class `Random` use a `protected` utility method that on each invocation can supply up to 32 pseudorandomly generated bits. Many applications will find the method Math#random simpler to use. Instances of `java.util.Random` are threadsafe. However, the concurrent use of the same `java.util.Random` instance across threads may encounter contention and consequent poor performance. Consider instead using `java.util.concurrent.ThreadLocalRandom` in multithreaded designs. Instances of `java.util.Random` are not cryptographically secure. Consider instead using `java.security.SecureRandom` to get a cryptographically secure pseudo-random number generator for use by security-sensitive applications. ''' __slots__ = ('seed', 'nextNextGaussian', 'haveNextNextGaussian') multiplier = 0x5DEECE66D addend = 0xB mask = (1 << 48) - 1 unique_seed = ctypes.c_uint64(8682522807148012) DOUBLE_UNIT = 1.0 / (1 << 53) @classmethod def seedUniquifier(self): next_seed = Random.unique_seed.value * 181783497276652981 Random.unique_seed.value = next_seed return next_seed @classmethod def initialScramble(self, seed): return (seed ^ self.multiplier) & self.mask def __init__(self, seed=None): ''' Creates a new random number generator using a single `long` seed. The seed is the initial value of the internal state of the pseudorandom number generator which is maintained by method `next`. The invocation `Random(seed)` is equivalent to: ``` Random rnd = new Random(); rnd.setSeed(seed);} ``` :param seed the initial seed See setSeed(). ''' self.seed = 0 self.nextNextGaussian = 0.0 self.haveNextNextGaussian = False if seed is None: seed = self.seedUniquifier() ^ int(time.monotonic() * 1e9) self.setSeed(seed) def setSeed(self, seed): ''' Sets the seed of this random number generator using a single `long` seed. The general contract of `setSeed` is that it alters the state of this random number generator object so as to be in exactly the same state as if it had just been created with the argument `seed` as a seed. The method `setSeed` is implemented by class `Random` by atomically updating the seed to `(seed ^ 0x5DEECE66DL) & ((1L << 48) - 1)` and clearing the `haveNextNextGaussian` flag used by `nextGaussian`. The implementation of `setSeed` by class `Random` happens to use only 48 bits of the given seed. In general, however, an overriding method may use all 64 bits of the `long` argument as a seed value. :param seed the initial seed ''' self.seed = self.initialScramble(seed) self.haveNextNextGaussian = False def next(self, bits): ''' Generates the next pseudorandom number. Subclasses should override this, as this is used by all other methods. The general contract of `next` is that it returns an `int` value and if the argument `bits` is between `1` and `32` (inclusive), then that many low-order bits of the returned value will be (approximately) independently chosen bit values, each of which is (approximately) equally likely to be `0` or `1`. The method `next` is implemented by class `Random` by atomically updating the seed to `(seed * 0x5DEECE66DL + 0xBL) & ((1L << 48) - 1)` and returning `(int)(seed >>> (48 - bits))` This is a linear congruential pseudorandom number generator, as defined by <NAME> and described by <NAME> in _The Art of Computer Programming,_ Volume 3: _Seminumerical Algorithms_, section 3.2.1. :param bits random bits :return the next pseudorandom value from this random number generator's sequence ''' nextseed = (self.seed * self.multiplier + self.addend) & self.mask self.seed = nextseed return (nextseed >> (48 - bits)) def nextBytes(self, buffer): ''' Generates random bytes and places them into a user-supplied bytearray. The number of random bytes produced is equal to the length of the byte array. The method `nextBytes` is implemented by class `Random` as if by: ```java public void nextBytes(byte[] bytes) { for (int i = 0; i < bytes.length; ) for (int rnd = nextInt(), n = Math.min(bytes.length - i, 4); n-- > 0; rnd >>= 8) bytes[i++] = (byte)rnd; }} ``` :param bytes the bytearray to fill with random bytes ''' bLen = len(buffer) i = 0 while i < bLen: rnd = self.nextInt() for _ in range(min(bLen - i, 4)): buffer[i] = rnd & 0xFF rnd >>= 8 i += 1 def nextInt(self, bound=None): ''' Returns the next pseudorandom, uniformly distributed `int` value from this random number generator's sequence. The general contract of `nextInt` is that one `int` value is pseudorandomly generated and returned. All 2<sup>32</sup> possible `int` values are produced with (approximately) equal probability. The method `nextInt` is implemented by class `Random` as if by: ``` public int nextInt() { return next(32); }} ``` or if bound is specified: Returns a pseudorandom, uniformly distributed `int` value between 0 (inclusive) and the specified value (exclusive), drawn from this random number generator's sequence. The general contract of `nextInt` is that one `int` value in the specified range is pseudorandomly generated and returned. All `bound` possible `int` values are produced with (approximately) equal probability. The method `nextInt(int bound)` is implemented by class `Random` as if by: ``` public int nextInt(int bound) { if (bound <= 0) throw new IllegalArgumentException("bound must be positive"); if ((bound & -bound) == bound) // i.e., bound is a power of 2 return (int)((bound * (long)next(31)) >> 31); int bits, val; do { bits = next(31); val = bits % bound; } while (bits - val + (bound-1) < 0); return val; }} ``` The hedge "approximately" is used in the foregoing description only because the next method is only approximately an unbiased source of independently chosen bits. If it were a perfect source of randomly chosen bits, then the algorithm shown would choose `int` values from the stated range with perfect uniformity. The algorithm is slightly tricky. It rejects values that would result in an uneven distribution (due to the fact that 2^31 is not divisible by n). The probability of a value being rejected depends on n. The worst case is n=2^30+1, for which the probability of a reject is 1/2, and the expected number of iterations before the loop terminates is 2. The algorithm treats the case where n is a power of two specially: it returns the correct number of high-order bits from the underlying pseudo-random number generator. In the absence of special treatment, the correct number of _low-order_ bits would be returned. Linear congruential pseudo-random number generators such as the one implemented by this class are known to have short periods in the sequence of values of their low-order bits. Thus, this special case greatly increases the length of the sequence of values returned by successive calls to this method if n is a small power of two. :param bound the upper bound (exclusive). Must be positive or None. :return the next pseudorandom, uniformly distributed `int` value between zero (inclusive) and `bound` (exclusive) if specified, from this random number generator's sequence :throws ValueError if bound is not positive ''' if bound is None: return self.next(32) if bound <= 0: raise ValueError('bound must be greater than 0') r = self.next(31) m = bound - 1 if not bound & m: r = ((bound * r) >> 31) & 0xFFFFFFFF else: u = r r = u % bound while u - r + m < 0: u = self.next(31) r = u % bound return r def nextLong(self, bound=None): ''' Returns the next pseudorandom, uniformly distributed `long` value from this random number generator's sequence. The general contract of `nextLong` is that one `long` value is pseudorandomly generated and returned. The method `nextLong` is implemented by class `Random` as if by: ``` public long nextLong() { return ((long)next(32) << 32) + next(32); }} ``` Because class `Random` uses a seed with only 48 bits, this algorithm will not return all possible `long` values. :param bound the upper bound (exclusive). Must be positive or None. :return the next pseudorandom, uniformly distributed `long` value between zero (inclusive) and `bound` (exclusive) if specified, from this random number generator's sequence :throws ValueError if bound is not positive ''' if bound is None: return (self.next(32) << 32) + self.next(32) if bound <= 0: raise ValueError('bound must be greater than 0') r = (self.next(32) << 32) + self.next(32) m = bound - 1 if not bound & m: r = (r & m) else: u = r >> 1 r = u % bound while u + m - r < 0: u = ((self.next(32) << 32) + self.next(32)) >> 1 r = u % bound return r def nextBoolean(self): ''' Returns the next pseudorandom, uniformly distributed `boolean` value from this random number generator's sequence. The general contract of `nextBoolean` is that one `boolean` value is pseudorandomly generated and returned. The values `true` and `false` are produced with (approximately) equal probability. The method `nextBoolean` is implemented by class `Random` as if by: ``` public boolean nextBoolean() { return next(1) != 0; }} ``` :return the next pseudorandom, uniformly distributed `boolean` value from this random number generator's sequence ''' return bool(self.next(1)) def nextFloat(self): ''' Returns the next pseudorandom, uniformly distributed `float` value between `0.0` and `1.0` from this random number generator's sequence. The general contract of `nextFloat` is that one `float` value, chosen (approximately) uniformly from the range `0.0f` (inclusive) to `1.0f` (exclusive), is pseudorandomly generated and returned. All 2<sup>24</sup> possible `float` values of the form _m x 2^-24, where _m_ is a positive integer less than 2^24, are produced with (approximately) equal probability. The method `nextFloat` is implemented by class `Random` as if by: ``` public float nextFloat() { return next(24) / ((float)(1 << 24)); }} ``` The hedge "approximately" is used in the foregoing description only because the next method is only approximately an unbiased source of independently chosen bits. If it were a perfect source of randomly chosen bits, then the algorithm shown would choose `float` values from the stated range with perfect uniformity.<p> [In early versions of Java, the result was incorrectly calculated as: `return next(30) / ((float)(1 << 30));}` This might seem to be equivalent, if not better, but in fact it introduced a slight nonuniformity because of the bias in the rounding of floating-point numbers: it was slightly more likely that the low-order bit of the significand would be 0 than that it would be 1.] :return the next pseudorandom, uniformly distributed `float` value between `0.0` and `1.0` from this random number generator's sequence ''' return self.next(24) / float(1 << 24) def nextDouble(self): ''' Returns the next pseudorandom, uniformly distributed `double` value between `0.0` and `1.0` from this random number generator's sequence. The general contract of `nextDouble` is that one `double` value, chosen (approximately) uniformly from the range `0.0d` (inclusive) to `1.0d` (exclusive), is pseudorandomly generated and returned. The method `nextDouble` is implemented by class `Random` as if by: ``` public double nextDouble() { return (((long)next(26) << 27) + next(27)) / (double)(1L << 53); }} ``` The hedge "approximately" is used in the foregoing description only because the `next` method is only approximately an unbiased source of independently chosen bits. If it were a perfect source of randomly chosen bits, then the algorithm shown would choose `double` values from the stated range with perfect uniformity. [In early versions of Java, the result was incorrectly calculated as: `return (((long)next(27) << 27) + next(27)) / (double)(1L << 54);` This might seem to be equivalent, if not better, but in fact it introduced a large nonuniformity because of the bias in the rounding of floating-point numbers: it was three times as likely that the low-order bit of the significand would be 0 than that it would be 1! This nonuniformity probably doesn't matter much in practice, but we strive for perfection.] :return the next pseudorandom, uniformly distributed `double` value between `0.0` and `1.0` from this random number generator's sequence ''' return ((self.next(26) << 27) + self.next(27)) * self.DOUBLE_UNIT def nextGaussian(self): ''' Returns the next pseudorandom, Gaussian ("normally") distributed `double` value with mean `0.0` and standard deviation `1.0` from this random number generator's sequence. The general contract of `nextGaussian` is that one `double` value, chosen from (approximately) the usual normal distribution with mean `0.0` and standard deviation `1.0`, is pseudorandomly generated and returned. The method `nextGaussian` is implemented by class `Random` as if by a threadsafe version of the following: ``` private double nextNextGaussian; private boolean haveNextNextGaussian = false; public double nextGaussian() { if (haveNextNextGaussian) { haveNextNextGaussian = false; return nextNextGaussian; } else { double v1, v2, s; do { v1 = 2 * nextDouble() - 1; // between -1.0 and 1.0 v2 = 2 * nextDouble() - 1; // between -1.0 and 1.0 s = v1 * v1 + v2 * v2; } while (s >= 1 || s == 0); double multiplier = StrictMath.sqrt(-2 * StrictMath.log(s)/s); nextNextGaussian = v2 * multiplier; haveNextNextGaussian = true; return v1 * multiplier; } }} ``` This uses the _polar method_ of <NAME>, <NAME>, and <NAME>, as described by <NAME> in _The Art of Computer Programming_, Volume 3: _Seminumerical Algorithms_, section 3.4.1, subsection C, algorithm P. Note that it generates two independent values at the cost of only one call to `StrictMath.log` and one call to `StrictMath.sqrt`. :return the next pseudorandom, Gaussian ("normally") distributed `double` value with mean `0.0` and standard deviation `1.0` from this random number generator's sequence ''' if self.haveNextNextGaussian: self.haveNextNextGaussian = False return self.nextNextGaussian v1 = 2.0 * self.nextDouble() - 1.0 v2 = 2.0 * self.nextDouble() - 1.0 s = v1 * v1 + v2 * v2 while s >= 1.0 or s == 0.0: v1 = 2.0 * self.nextDouble() - 1.0 v2 = 2.0 * self.nextDouble() - 1.0 s = v1 * v1 + v2 * v2 multiplier = sqrt(-2.0 * log(s) / s) self.nextNextGaussian = v2 * multiplier self.haveNextNextGaussian = True return v1 * multiplier def ints(self, streamSize=None, randomNumberOrigin=None, randomNumberBound=None): ''' Returns a stream producing the given `streamSize` number of pseudorandom `int` values or an unlimited stream if `streamSize` is `None`, each conforming to the given origin (inclusive) and bound (exclusive) if `randomNumberOrigin` and `randomNumberBound` are specified. A pseudorandom `int` value is generated as if it's the result of calling the method `nextInt()`. :param streamSize the number of values to generate or `None` :param randomNumberOrigin the origin (inclusive) of each random value :param randomNumberBound the bound (exclusive) of each random value :return a stream of pseudorandom `int` values, each with the given origin (inclusive) and bound (exclusive) :throws ValueError if `streamSize` is less than zero, or `randomNumberOrigin` is greater than or equal to `randomNumberBound` ''' if streamSize is not None and streamSize < 0: raise ValueError('streamSize must be non-negative') if randomNumberOrigin is not None or randomNumberBound is not None: if randomNumberOrigin is None or randomNumberBound is None: raise ValueError('randomNumberOrigin and randomNumberBound must both be specified, if either is not None') if randomNumberOrigin >= randomNumberBound: raise ValueError('randomNumberOrigin must be less than randomNumberBound') n = randomNumberBound - randomNumberOrigin if streamSize is None: while True: yield self.nextInt(n) + randomNumberOrigin else: for _ in range(streamSize): yield self.nextInt(n) + randomNumberOrigin else: if streamSize is None: while True: yield self.nextInt() else: for _ in range(streamSize): yield self.nextInt() def longs(self, streamSize=None, randomNumberOrigin=None, randomNumberBound=None): ''' Returns a stream producing the given `streamSize` number of pseudorandom `long` values or an unlimited stream if `streamSize` is `None`, each conforming to the given origin (inclusive) and bound (exclusive) if `randomNumberOrigin` and `randomNumberBound` are specified. A pseudorandom `long` value is generated as if it's the result of calling the method `nextLong()`. :param streamSize the number of values to generate or `None` :param randomNumberOrigin the origin (inclusive) of each random value :param randomNumberBound the bound (exclusive) of each random value :return a stream of pseudorandom `long` values, each with the given origin (inclusive) and bound (exclusive) :throws ValueError if `streamSize` is less than zero, or `randomNumberOrigin` is greater than or equal to `randomNumberBound` ''' if streamSize is not None and streamSize < 0: raise ValueError('streamSize must be non-negative') if randomNumberOrigin is not None or randomNumberBound is not None: if randomNumberOrigin is None or randomNumberBound is None: raise ValueError('randomNumberOrigin and randomNumberBound must both be specified, if either is not None') if randomNumberOrigin >= randomNumberBound: raise ValueError('randomNumberOrigin must be less than randomNumberBound') n = randomNumberBound - randomNumberOrigin if streamSize is None: while True: yield self.nextLong(n) + randomNumberOrigin else: for _ in range(streamSize): yield self.nextLong(n) + randomNumberOrigin else: if streamSize is None: while True: yield self.nextLong() else: for _ in range(streamSize): yield self.nextLong() def _internalNextDouble(self, origin, bound, n): r = self.nextDouble() r = r * n + origin if r >= bound: # correct for rounding bits = _DoubleBits(double_value=r) bits.long_value -= 1 return bits.double_value return r def doubles(self, streamSize=None, randomNumberOrigin=None, randomNumberBound=None): ''' Returns a stream producing the given `streamSize` number of pseudorandom `double` values or an unlimited stream if `streamSize` is `None`, each conforming to the given origin (inclusive) and bound (exclusive) if `randomNumberOrigin` and `randomNumberBound` are specified. A pseudorandom `double` value is generated as if it's the result of calling the method `nextDouble()`. :param streamSize the number of values to generate or `None` :param randomNumberOrigin the origin (inclusive) of each random value :param randomNumberBound the bound (exclusive) of each random value :return a stream of pseudorandom `double` values, each with the given origin (inclusive) and bound (exclusive) :throws ValueError if `streamSize` is less than zero, or `randomNumberOrigin` is greater than or equal to `randomNumberBound` ''' if streamSize is not None and streamSize < 0: raise ValueError('streamSize must be non-negative') if randomNumberOrigin is not None or randomNumberBound is not None: if randomNumberOrigin is None or randomNumberBound is None: raise ValueError('randomNumberOrigin and randomNumberBound must both be specified, if either is not None') if randomNumberOrigin >= randomNumberBound: raise ValueError('randomNumberOrigin must be less than randomNumberBound') n = randomNumberBound - randomNumberOrigin if streamSize is None: while True: yield self._internalNextDouble(randomNumberOrigin, randomNumberBound, n) else: for _ in range(streamSize): yield self._internalNextDouble(randomNumberOrigin, randomNumberBound, n) else: if streamSize is None: while True: yield self.nextDouble() else: for _ in range(streamSize): yield self.nextDouble()
<reponame>minhooo/Sweetheart<gh_stars>0 import cv2 import numpy as np from os import makedirs from os.path import isdir from flask import Flask, jsonify from flask_restful import Resource, Api, reqparse from flask_cors import CORS import os from os import listdir from os.path import isfile, join import sys #얼굴 저장 함수 face_dirs = 'faces/' face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') #얼굴 검출 함수 def face_extractor(img): gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale(gray,1.3,5) #얼굴이 없으면 패스! if faces is(): return None # 얼굴이 있으면 얼굴 부위만 이미지로 만든다 for(x,y,w,h) in faces: cropped_face = img[y:y+h, x:x+w] return cropped_face # 얼굴만 저장하는 함수 # def take_pictures(name): # if not isdir(face_dirs+name): # makedirs(face_dirs+name) # 카메라 ON def face_detection(): print('face_detection') cap = cv2.VideoCapture(0) count = 0 while True: # 카메라로 부터 사진 한장 읽어 오기 ret, frame = cap.read() # 얼굴 감지 하여 얼굴만 가져오기 if face_extractor(frame) is not None: count += 1 # 얼굴 이미지 크기르 200 x 200으로 조정 face = cv2.resize(face_extractor(frame), (200, 200)) # 조정된 이미지를 흑백으로 변환 face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY) # faces폴더에 jpg파일로 저장 file_name_path = 'faces/user' + str(count) + '.jpg' cv2.imwrite(file_name_path, face) # 화면에 얼굴과 현재 저장 개수 표시 cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2) cv2.imshow('Face Cropper', face) else: print("Face not Found") pass if cv2.waitKey(1) == 13 or count == 100: break cap.release() cv2.destroyAllWindows() print('Colleting Samples Complete!!!') face_learning() def face_learning(): print('face_learning') data_path = 'faces/' # faces폴데 있는 파일 리스트 얻기 onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path, f))] # 데이터와 매칭될 라벨 변수 Training_Data, Labels = [], [] # 파일 개수 만큼 루프 for i, files in enumerate(onlyfiles): image_path = data_path + onlyfiles[i] # 이미지 불러오기 images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) # 이미지 파일이 아니거나 못 읽어 왔다면 무시 Training_Data.append(np.asarray(images, dtype=np.uint8)) # Labels 리스트엔 카운트 번호 추가 Labels.append(i) # Labels를 32비트 정수로 변환 Labels = np.asarray(Labels, dtype=np.int32) # 모델 생성 model = cv2.face.LBPHFaceRecognizer_create() # 학습 시작 model.train(np.asarray(Training_Data), np.asarray(Labels)) print("Model Training Complete!!!!!") def face_detector(img, size=0.5): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale(gray, 1.3, 5) if faces is (): return img, [] for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2) roi = img[y:y + h, x:x + w] roi = cv2.resize(roi, (200, 200)) return img, roi def on_face_login(): print('on_face_login()') data_path = 'faces/' onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path, f))] Training_Data, Labels = [], [] for i, files in enumerate(onlyfiles): image_path = data_path + onlyfiles[i] images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) Training_Data.append(np.asarray(images, dtype=np.uint8)) Labels.append(i) Labels = np.asarray(Labels, dtype=np.int32) model = cv2.face.LBPHFaceRecognizer_create() model.train(np.asarray(Training_Data), np.asarray(Labels)) print("Model Training Complete!!!!!") face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') # 카메라 열기 cap = cv2.VideoCapture(0) while True: # 카메라로 부터 사진 한장 읽기 ret, frame = cap.read() # 얼굴 검출 시도 image, face = face_detector(frame) try: # 검출된 사진을 흑백으로 변환 face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY) # 위에서 학습된 모델로 예측시도 result = model.predict(face) # result[1]은 신뢰도이고 0에 가까울수록 동일인물이라는 뜻 if result[1] < 500: confidence = int(100 * (1 - (result[1]) / 300)) # 유사도 화면에 표시 display_string = str(confidence) + '% Confidence it is user' cv2.putText(image, display_string, (100, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (250, 120, 255), 2) # 87 이상이면 동일 인물(수정가능) if confidence > 80: cv2.putText(image, "Unlocked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2) cv2.imshow('Face Cropper', image) return "Success" else: # 87 이하면 unlocked cv2.putText(image, "Locked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2) cv2.imshow('Face Cropper', image) except: # 얼굴 검출 안됨 cv2.putText(image, "Face Not Found", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2) cv2.imshow('Face Cropper', image) pass if cv2.waitKey(1) == 13: break cap.release() cv2.destroyAllWindows() app = Flask(__name__) api = Api(app) CORS(app, resources={r'/*': {'origins': '*'}}) @app.route('/faceDetection', methods=['POST']) def face_register(): print('face register()') face_detection() return "face detection success" @app.route('/faceLogin', methods=['GET']) def face_login(): returnResult = on_face_login() print(returnResult) return returnResult if __name__ == '__main__': app.run(host='localhost', port=os.getenv('FLASK_RUN_PORT'), debug=os.getenv('FLASK_DEBUG'))
<reponame>JurgenVanGorp/MCP23017-multi-IO-control-on-a-Raspberry-Pi-with-I2C<gh_stars>0 #!/usr/bin/env python """ MCP23017 Control Service. A service that acts as an interface between (e.g. Home Assistant) clients and the I2C bus on a Raspberry Pi. Author: find me on codeproject.com --> JurgenVanGorp """ import traceback import os import sys import time import logging import redis from logging.handlers import RotatingFileHandler import xml.etree.ElementTree as ET from datetime import datetime from smbus2 import SMBus from threading import Thread, Lock VERSION = "1.00" ### ### USER EDITABLE CONSTANTS ##################################################################### ### # CONFIGURATION_FILE is the name of the configuration file that will be written on the home # location of the current user when running the program. The configuration file is read at # the first start of the program, e.g. after a power failure, to make sure that the # MCP23017 devices are reconfigured to their latest state. # The CONFIGURATION _FILE contains the latest configured MCP23017 DIR values, which will # be written on the I2C channel once on the bus after e.g. a cold boot. # Remark that the dot in front of the filename makes it invisible for a regular ls. # CONFIGURATION_FILE = ".mcp23017control.xml" --> Default value # CONFIGURATION_FILE = '' --> Set to empty string to disable this feature. CONFIGURATION_FILE = ".mcp23017server.xml" # LOG_LEVEL determines the level of logging output into the system logs. # Log Level = 0 --> No logging at all # Log Level = 1 --> (DEFAULT) give details on application status and errors only # Log Level = 2 --> Babble, babble, babble ... # Remark that the dot in front of the filename makes it invisible. the file is saved # in your home folder. LOG_LEVEL = 1 LOG_FILE = '.mcp23017server.log' # DEMO_MODE_ONLY = True --> Print on screen what would happen on the I2C bus. Use this # when e.g. running the program manually (not as a service) to verify operation for # your own software. # DEMO_MODE_ONLY = False --> Actually write the values on the I2C bus DEMO_MODE_ONLY = False # Acceptable Commands for controlling the I2C bus # These are the commands you need to use to control the DIR register of the MCP23017, or # for setting and clearing pins. FINDBOARD = "IDENTIFY" # Identify Board number, return 1 if found on the I2C bus GETDIRBIT = "GETDBIT" # Read the specific IO pin dir value (1 = input) GETDIRREGISTER = "GETDIRREG" # Read the full DIR register (low:1 or high:2) SETDIRBIT = "SETDBIT" # Set DIR pin to INPUT (1) CLEARDIRBIT = "CLRDBIT" # Clear DIR pin command to OUTPUT (0) GETIOPIN = "GETPIN" # Read the specific IO pin value GETIOREGISTER = "GETIOREG" # Read the full IO register (low:1 or high:2) SETDATAPIN = "SETPIN" # Set pin to High CLEARDATAPIN = "CLRPIN" # Set pin to low TOGGLEPIN = "TOGGLE" # Toggle a pin to the "other" value for TOGGLEDELAY time # If a pin is high, it will be set to low, and vice versa TOGGLEDELAY = 0.1 # Seconds that the pin will be toggled. Default = 100 msec # The COMMAND_TIMEOUT value is the maximum time (in seconds) that is allowed between pushing a # button and the action that must follow. This is done to protect you from delayed actions # whenever the I2C bus is heavily used, or the CPU is overloaded. If you e.g. push a button, # and the I2C is too busy with other commands, the push-button command is ignored when # COMMAND_TIMEOUT seconds have passed. Typically you would push the button again if nothing # happens after one or two seconds. If both commands are stored, the light is switched on and # immediately switched off again. # Recommended minimum value one or two seconds # COMMAND_TIMEOUT = 2 # Recommended maximum value is 10 seconds. Feel free to set higher values, but be prepared that # you can can experience strange behaviour if there is a lot of latency on the bus. COMMAND_TIMEOUT = 1.5 # Communications between Clients and the server happen through a Redis in-memory database # so to limit the number of writes on the (SSD or microSD) storage. For larger implementations # dozens to hundreds of requests can happen per second. Writing to disk would slow down the # process, and may damage the storage. # Make sure to have Redis installed in the proper locations, e.g. also in the virtual python # environments. The default is that Redis is installed on localhost (127.0.0.1). REDIS_HOST = 'localhost' REDIS_PORT = 6379 ### ### PROGRAM INTERNAL CONSTANTS #################################################################### ### # Software version VERSION = '0.9.0' # MCP23017 default parameters are that you can address the devices in the 0x20 to 0x2F # address space with the three selector pins. You can change these if you want to use # the software for other I2C devices. MINBOARDID = 0x20 # Minimum I2C address MAXBOARDID = 0x2f # Maximum I2C address MINPIN = 0x00 # Minimum pin on the MCP23017 MAXPIN = 0x10 # Maximum pin on the MCP23017, +1 (i.e. must be lower than this value) # TimeOut in seonds before the threads are considered dead. If the time-out is reached, # the thread will crash and die, and is expected to be restarted as a service WATCHDOG_TIMEOUT = 5 ### Define MCP23017 specific registers IODIRA = 0x00 # IO direction A - 1= input 0 = output IODIRB = 0x01 # IO direction B - 1= input 0 = output IPOLA = 0x02 # Input polarity A IPOLB = 0x03 # Input polarity B GPINTENA = 0x04 # Interrupt-onchange A GPINTENB = 0x05 # Interrupt-onchange B DEFVALA = 0x06 # Default value for port A DEFVALB = 0x07 # Default value for port B INTCONA = 0x08 # Interrupt control register for port A INTCONB = 0x09 # Interrupt control register for port B IOCON = 0x0A # Configuration register GPPUA = 0x0C # Pull-up resistors for port A GPPUB = 0x0D # Pull-up resistors for port B INTFA = 0x0E # Interrupt condition for port A INTFB = 0x0F # Interrupt condition for port B INTCAPA = 0x10 # Interrupt capture for port A INTCAPB = 0x11 # Interrupt capture for port B GPIOA = 0x12 # Data port A GPIOB = 0x13 # Data port B OLATA = 0x14 # Output latches A OLATB = 0x15 # Output latches B ALLOUTPUTS = "0xff" # Initial value of DIR register if not yet used # The dummy command is sent during initialization of the database and verification if # the database can be written to. Dummy commands are not processed. DUMMY_COMMAND = 'dummycommand' ### END OF CONSTANTS SECTION ######################################################### class databaseHandler(): """ A class for communicating between the server and clients through a shared memory Redis database. Two databases are initiated (or used) for communicating from client to server (0) or from server to client (1). """ def __init__(self, the_log): # Commands have id datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")}, i.e. the primary key is a timestamp. # Commands given at exactly the same time, will overwrite each other, but this is not expected to happen. # The commands table is then formatted as (all fields are TEXT, even if formatted as "0xff" !!) # id, command TEXT, boardnr TEXT DEFAULT '0x00', pinnr TEXT DEFAULT '0x00', datavalue TEXT DEFAULT '0x00' self._commands = None # Responses have id datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")}, i.e. the primary key is a timestamp. # The Responses table is then formatted as (all fields are TEXT, even if formatted as "0xff" !!) # id, command_id TEXT, datavalue TEXT, response TEXT self._responses = None # Copy logfile to local self._log = the_log # Initialize database self.OpenAndVerifyDatabase() def OpenAndVerifyDatabase(self): """ Opens an existing database, or creates a new one if not yet existing. Then verifies if the Redis database is accessible. """ # First try to open the database itself. try: # Open the shared memory databases. # Redis database [0] is for commands that are sent from the clients to the server. nowTrying = "Commands" self._log.info(1, "Opening Commands database.") self._commands = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0) # Redis database [1] is for responses from the server so the clients. nowTrying = "Responses" self._log.info(1, "Opening Responses database.") self._responses = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=1) except OSError as err: # Capturing OS error. self._log.error(1, "FATAL OS ERROR. Could not open [{}] database. This program is now exiting with error [{}].".format(nowTrying, err)) # If a database cannot be opened, this program makes no sense, so exiting. sys.exit(1) except: # Capturing all other errors. self._log.error(1, "FATAL UNEXPECTED ERROR. Could not open [{}] database. This program is now exiting with error [{}].".format(nowTrying, sys.exc_info()[0])) # If a database cannot be opened, this program makes no sense, so exiting. sys.exit(1) # Do a dummy write to the Commands database, as verification that the database is fully up and running. try: # Remember: fields are: id, command TEXT, boardnr TEXT DEFAULT '0x00', pinnr TEXT DEFAULT '0x00', datavalue TEXT DEFAULT '0x00' self._log.info(2, "Verifying Commands database with dummy write.") id = (datetime.now() - datetime.utcfromtimestamp(0)).total_seconds() datamap = {'command':DUMMY_COMMAND, 'boardnr':0x00, 'pinnr':0xff, 'datavalue':0x00} # Write the info to the Redis database self._commands.hset(id, None, None, datamap) # Set expiration to a short 1 second, after which Redis will automatically delete the record self._commands.expire(id, 1) except: # Capturing all errors. self._log.error(1, "FATAL UNEXPECTED ERROR. Could not read and/or write the [Commands] database. This program is now exiting with error [{}].".format(sys.exc_info()[0])) # If a database cannot be processed, this program makes no sense, so exiting. sys.exit(1) # Next, do a dummy write to the Responses database, as verification that the database is fully up and running. try: # Remember: fields are: id, command_id TEXT, datavalue TEXT, response TEXT self._log.info(2, "Verifying Responses database with dummy write.") id = (datetime.now() - datetime.utcfromtimestamp(0)).total_seconds() datamap = {'datavalue':0x00, 'response':'OK'} # Write the info to the Redis database self._responses.hset(id, None, None, datamap) # Set expiration to a short 1 second, after which Redis will automatically delete the record self._responses.expire(id, 1) except: # Capturing all errors. self._log.error(1, "FATAL UNEXPECTED ERROR. Could not read and/or write the [Responses] database. This program is now exiting with error [{}].".format(sys.exc_info()[0])) # If a database cannot be processed, this program makes no sense, so exiting. sys.exit(1) def GetNextCommand(self): """ Fetches the oldest command - that has not expired - from the commands buffer. """ # Get all keys from the Commands table rkeys = self._commands.keys("*") # Key IDs are based on the timestamp, so sorting will pick the oldest first rkeys.sort() # Check if there are keys available if len(rkeys) > 0: # Get the first key from the list id = rkeys[0] # Read the Redis data datarecord = self._commands.hgetall(id) # We have the data, now delete the record (don't wait for the time-out) self._commands.delete(id) # pull the data from the record, and do proper conversions. # Correct potential dirty entries, to avoid that the software crashes on poor data. try: return_id = float(id.decode('ascii')) except: return_id = 0 try: command = datarecord[b'command'].decode('ascii') except: command = '' try: boardnr = datarecord[b'boardnr'].decode('ascii') except: boardnr = 0x00 try: pinnr = datarecord[b'pinnr'].decode('ascii') except: pinnr = 0x00 try: datavalue = datarecord[b'datavalue'].decode('ascii') except: datavalue = 0x00 # return the data read return(return_id, command, boardnr, pinnr, datavalue) else: # return a zero record if nothing was received return (0, '', 0x00, 0x00, 0x00) def ReturnResponse(self, id, value, response): """ Returns the data value to the client through the Responses buffer. Also does the house-keeping, deleting all old entries that would still exist. """ # Remember: fields are : id, command_id TEXT, datavalue TEXT, response TEXT # The Response ID is the same as the Command ID, making it easy for the client to capture the data. mapping = {'command_id':id, 'datavalue':value, 'response':response} self._responses.hset(id, None, None, mapping) # set auto-delete time-out in the Redis database. Add several seconds grace period, and round to integer values self._responses.expire(id, round(COMMAND_TIMEOUT + 2)) class mcp23017broker(): """ A class that is a man in the middle between external clients and I2C attached devices. This class is based on a shared memory database. """ def __init__(self, the_log, i2chandler, xmldata = None): # Copy logfile to local self._log = the_log # Create a handler for the I2C communications self._i2chandler = i2chandler # Inherit the xmldata communication self._xmldata = xmldata # Create a data pipe to the in-memory database self._datapipe = databaseHandler(self._log) def service_commands(self): """ Process incoming data coming from the connected clients (one at the time). Properly formatted commands are processed immediately, or as separate threads (for long-lasting commands). """ # Fetch a command from the pipe command_list = self._datapipe.GetNextCommand() # a command id larger than 0 is a successful read. Command ID zero is returned if the pipe is empty. if command_list[0] > 0: self._log.info(2, "Received command with id [{}]: [{}] for board [{}] and pin [{}].".format(str(command_list[0]), command_list[1], str(command_list[2]), str(command_list[3]))) # Start the reply error with an empty error self._return_error = "" # retrieve commands from the pipe command_id = command_list[0] the_command = command_list[1] the_board = command_list[2] the_pin = command_list[3] # During initialization a dummy command is sent. This is also done by the clients, so make sure that these commands are thrown away. if the_command != DUMMY_COMMAND: # Inputs can have different formats, also numerical as hexadecimal (e.g. '0x0f'). Convert where necessary. if(isinstance(the_board,str)): if 'x' in the_board: the_board = int(the_board, 16) else: the_board = int(the_board, 10) the_value = command_list[3] if(isinstance(the_value,str)): if 'x' in the_value: the_value = int(the_value, 16) else: the_value = int(the_value, 10) # Describe what we are expecting on the bus. set_expectation = "Error: first command must be one of the following {}, {}, {}, {}, {}, {}, {}, {}, {}, {}. ".format(FINDBOARD, GETDIRBIT, GETDIRREGISTER, SETDIRBIT, CLEARDIRBIT, GETIOPIN, GETIOREGISTER, SETDATAPIN, CLEARDATAPIN, TOGGLEPIN) # Using a try here, because the command could also be very, very dirty. try: if the_command not in {FINDBOARD, GETIOPIN, SETDIRBIT, CLEARDIRBIT, GETDIRBIT, SETDATAPIN, CLEARDATAPIN, GETIOREGISTER, GETDIRREGISTER, TOGGLEPIN}: self._return_error += set_expectation self._log.info(2, set_expectation) except: # Exception can happen if the_command is something _very_ weird, so need to capture that too without crashing self._return_error += set_expectation self._log.info(2, set_expectation) # Test if Board ID is a hex number within allowed Board IDs try: if not(the_board in range(MINBOARDID, MAXBOARDID)): self._return_error += "Error: Board ID not in range [0x{:0{}X}, 0x{:0{}X}]. ".format(MINBOARDID, 2, MAXBOARDID-1, 2) self._log.info(2, "Error: Board ID not in range [0x{:0{}X}, 0x{:0{}X}]. ".format(MINBOARDID, 2, MAXBOARDID-1, 2)) except: # print error message to the systemctl log file if LOG_LEVEL == 2: print(traceback.format_exc()) self._return_error += "Error: wrongly formatted register. " self._log.info(2, "Error: wrongly formatted register. ") # Test if the pin number is a hex number from 0x00 to 0x0f (included) try: if not(the_value in range(MINPIN, MAXPIN)): self._return_error += "Error: registervalue not in range [0x{:0{}X}, 0x{:0{}X}]. ".format(MINPIN, 2, MAXPIN, 2) self._log.info(2, "Error: registervalue not in range [0x{:0{}X}, 0x{:0{}X}]. ".format(MINPIN, 2, MAXPIN, 2)) except: # print error message to the systemctl log file if LOG_LEVEL == 2: print(traceback.format_exc()) self._return_error += "Error: wrongly formatted data byte. " self._log.info(2, "Error: wrongly formatted data byte. ") # All checks done, continue processing if no errors were found. if self._return_error == '': # print status message to the systemctl log file if LOG_LEVEL == 2: print("Processing: {}, {}, {}.".format(the_command, the_board, the_value)) # Command format looks good, now process it and get the result back return_data = self.ProcessCommand(the_command, the_board, the_value) # Send an "OK" back, since we didn't find an error. self._datapipe.ReturnResponse(command_id, return_data, 'OK') self._log.debug(2, "Action result: {} OK\n".format(return_data)) else: # print error message to the systemctl log file if LOG_LEVEL > 0: print(self._return_error) # Send back an error if the command was not properly formatted. Do nothing else self._datapipe.ReturnResponse(command_id, '0x00', self._return_error) def ProcessCommand(self, task, board_id, pin): """ Identifies command and processes the command on the I2C bus. """ # Process I2C bus commands based on board ID and Pin nr return_byte = "" try: if task == GETDIRBIT: self._i2chandler.WaitForPinToBeReleased(board_id, pin, False) return_byte = '0x{:0{}X}'.format(self._i2chandler.GetI2CDirPin(board_id, pin),2) self._log.info(2, "Received byte [{}] from pin [{}] on board [{}] through GetI2CDirPin".format(return_byte, pin, board_id)) elif task == FINDBOARD: self._i2chandler.WaitForPinToBeReleased(board_id, pin, False) return_byte = '0x{:0{}X}'.format(self._i2chandler.IdentifyBoard(board_id),2) self._log.info(2, "Received byte [{}] from board [{}] through IdentifyBoard".format(return_byte, board_id)) elif task == GETDIRREGISTER: self._i2chandler.WaitForPinToBeReleased(board_id, pin, False) return_byte = '0x{:0{}X}'.format(self._i2chandler.GetI2CDirRegister(board_id, pin),2) self._log.info(2, "Received byte [{}] from pin [{}] on board [{}] through GetI2CDirRegister".format(return_byte, pin, board_id)) elif task == SETDIRBIT: return_byte = "" self._i2chandler.SetI2CDirPin(board_id, pin) self._log.info(2, "Setting DIR bit [{}] on board [{}] through SetI2CDirPin".format(pin, board_id)) if self._xmldata is not None: self._i2chandler.WaitForPinToBeReleased(board_id, pin, False) self._xmldata.set_board_pin(board_id, pin) elif task == CLEARDIRBIT: return_byte = "" self._i2chandler.ClearI2CDirPin(board_id, pin) self._log.info(2, "Clearing DIR bit [{}] on board [{}] through ClearI2CDirPin".format(pin, board_id)) if self._xmldata is not None: self._i2chandler.WaitForPinToBeReleased(board_id, pin, False) self._xmldata.clear_board_pin(board_id, pin) elif task == GETIOPIN: self._i2chandler.WaitForPinToBeReleased(board_id, pin, False) return_byte = '0x{:0{}X}'.format(self._i2chandler.GetI2CPin(board_id, pin),2) self._log.info(2, "Received byte [{}] from pin [{}] on board [{}] through GetI2CPin".format(return_byte, pin, board_id)) elif task == GETIOREGISTER: self._i2chandler.WaitForPinToBeReleased(board_id, pin, False) return_byte = '0x{:0{}X}'.format(self._i2chandler.GetI2CIORegister(board_id, pin),2) self._log.info(2, "Received Register [{}] from pin [{}] on board [{}] through GetI2CIORegister".format(return_byte, pin, board_id)) elif task == SETDATAPIN: return_byte = "" self._i2chandler.WaitForPinToBeReleased(board_id, pin, False) self._i2chandler.SetI2CPin(board_id, pin) self._log.info(2, "Setting bit [{}] on board [{}] through SetI2CPin".format(pin, board_id)) elif task == CLEARDATAPIN: return_byte = "" self._i2chandler.WaitForPinToBeReleased(board_id, pin, False) self._i2chandler.ClearI2CPin(board_id, pin) self._log.info(2, "Clearing bit [{}] on board [{}] through ClearI2CPin".format(pin, board_id)) elif task == TOGGLEPIN: return_byte = "" self._i2chandler.ToggleI2CPin(board_id, pin) self._log.info(2, "Toggling bit [{}] on board [{}] through ToggleI2CPin".format(pin, board_id)) else: # print error message to the systemctl log file if LOG_LEVEL > 1: print("Error: Did not understand command [{}].".format(task)) self._log.error(2, "Error: Did not understand command [{}].".format(task)) except Exception as err: error_string = traceback.format_exc() # print error message to the systemctl log file if LOG_LEVEL == 1: print(error_string) if self._xmldata is not None: self._xmldata.DeleteKey(board_id) self._log.error(1, "Error when processing I2C command: {}.".format(error_string)) return return_byte class i2cCommunication(): """ A class for doing communications to MCP23017 devices on the Raspberry Pi I2C bus. """ def __init__(self, the_log): # Copy logfile to local self._log = the_log self._log.info(2, "Initializing I2C Communication class.") # Create an empty set to be used for avoiding that multiple toggle commands can operate on the same pin # A mutex is needed to manage the self._toggle_set in a unique way self._toggle_set = set() self._toggle_mutex = Lock() # Create a new I2C bus (port 1 of the Raspberry Pi) if DEMO_MODE_ONLY: self.i2cbus = 0 else: self.i2cbus = SMBus(1) self._log.info(2, "Initializing SMBus 1 (I2C).") # Set up a Mutual Exclusive lock, such that parallel threads are not interfering with another thread writing on the I2C bus self._i2cMutex = Lock() self._log.info(2, "Initialized I2C Mutex.") # Initialize the boards that are being handled. self.managedboards = [] @property def allmanagedboards(self): return self.managedboards def CheckInitializeBoard(self, board_id): """ Verifies if a board is already in the managed list. If not, the Control Register for the board is initialized. """ # if board_id is given as a hex string, convert to int if(isinstance(board_id,str)): board_id = int(board_id, 16) return_value = True try: # check if a board is already managed. This lookup will result in an error if not dummy = (self.managedboards.index(board_id) >= 0) except: # Wait for the I2C bus to become free self._log.info(2, "Writing data [0x02] to IOCON register for board [0x{:0{}X}]".format(board_id, 2)) self._i2cMutex.acquire() try: # Initialize configuration register of the new board if DEMO_MODE_ONLY: print("SIMULATION : writing data [0x02] to IOCON register for board [0x{:0{}X}]".format(board_id, 2)) else: self.i2cbus.write_byte_data(board_id, IOCON, 0x02) # Since existing yet, add board to managed list if initialization was successful self.managedboards.append(board_id) except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = False finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() if not(return_value): self._log.error(2, "Writing [0x02] to IOCON register for board [0x{:0{}X}] Failed !".format(board_id, 2)) return return_value def ReadI2CDir(self, board_id, port_id): """ Function for reading the full DIR Register value for a specific IO board. """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(port_id,str)): port_id = int(port_id, 16) # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = -1 # Only start writing if the I2C bus is available self._log.info(2, "Reading DIR port [0x{:0{}X}] on board [0x{:0{}X}]".format(port_id, 2, board_id, 2)) self._i2cMutex.acquire() try: # Read the current value of the DIR register if DEMO_MODE_ONLY: print("SIMULATION : reading DIR port [0x{:0{}X}] on board [0x{:0{}X}]".format(port_id, 2, board_id, 2)) return_value = 0xff else: return_value = self.i2cbus.read_byte_data(board_id, port_id) except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = -1 finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() else: return_value = -1 return return_value def WriteI2CDir(self, board_id, port_id, newvalue): """ Function for writing the full DIR Register value for a specific IO board """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(port_id,str)): port_id = int(port_id, 16) if(isinstance(newvalue,str)): newvalue = int(newvalue, 16) # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = True # Only start writing if the I2C bus is available self._log.info(2, "Writing DIR port [0x{:0{}X}] on board [0x{:0{}X}] to new value [0x{:0{}X}]".format(port_id, 2, board_id, 2, newvalue, 2)) self._i2cMutex.acquire() try: if DEMO_MODE_ONLY: print("SIMULATION : writing DIR port [0x{:0{}X}] on board [0x{:0{}X}] to new value [0x{:0{}X}]".format(port_id, 2, board_id, 2, newvalue, 2)) return_value = True else: # Write the new value of the DIR register self.i2cbus.write_byte_data(board_id, port_id, newvalue) # Verify if the value is indeed accepted verification = self.i2cbus.read_byte_data(board_id, port_id) if verification != newvalue: return_value = False except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = False finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() else: return_value = False return return_value def IdentifyBoard(self, board_id): """ Identifies if board exists on the I2C bus. """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = 1 # Pin values up to 0x0f go to GPIOA, higher values go to GPIOB pin_nr = 1 # pick random pin number to be read from the board. We are not going to use it anyway. port_id = IODIRA # Only start reading if the I2C bus is available self._log.info(2, "Reading DIR pin from port [0x{:0{}X}] of board [0x{:0{}X}]".format(port_id, 2, board_id, 2)) #self.i2cMutex.acquire() try: if DEMO_MODE_ONLY: return_value = (1 << pin_nr) print("SIMULATION : reading DIR pin [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(return_value, 2, port_id, 2, board_id, 2)) else: # Read the current state of the IO register, then set ('OR') the one pin _ = self.i2cbus.read_byte_data(board_id, port_id) & (1 << pin_nr) return_value = 1 except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = 0 #finally: # # Free Mutex to avoid a deadlock situation # self.i2cMutex.release() else: return_value = 0 return return_value def GetI2CDirPin(self, board_id, pin_nr): """ Gets the current value of the DIR value of an pin on a board Pin number must be between 0 and 15 """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(pin_nr,str)): pin_nr = int(pin_nr, 16) # Verify if MCP23017 pin number between 0 and 15 if (pin_nr < 0) or (pin_nr > 15): return_value = -1 else: # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = 1 # Pin values up to 0x0f go to GPIOA, higher values go to GPIOB if (pin_nr > 7): port_id = IODIRB pin_nr = pin_nr % 8 else: port_id = IODIRA # Only start reading if the I2C bus is available self._log.info(2, "Reading DIR pin from port [0x{:0{}X}] of board [0x{:0{}X}]".format(port_id, 2, board_id, 2)) self._i2cMutex.acquire() try: if DEMO_MODE_ONLY: return_value = (1 << pin_nr) print("SIMULATION : reading DIR pin [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(return_value, 2, port_id, 2, board_id, 2)) else: # Read the current state of the IO register, then set ('OR') the one pin if (self.i2cbus.read_byte_data(board_id, port_id) & (1 << pin_nr)) == 0x00: return_value = 0 else: return_value = 1 except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = -1 finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() else: return_value = -1 return return_value def GetI2CDirRegister(self, board_id, reg_nr): """ Gets the current value of the DIR value of a pin on a board Pin number must be between 0 and 15 """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(reg_nr,str)): reg_nr = int(reg_nr, 16) # Verify if MCP23017 pin number between 0 and 15 if (reg_nr < 0) or (reg_nr > 15): return_value = -1 #aise Exception("Pin number must be between 0 and 15, but got [", pin_nr, "] for board ", board_id) else: # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = 1 # Pin values up to 0x0f go to GPIOA, higher values go to GPIOB if (reg_nr > 0): port_id = IODIRB else: port_id = IODIRA # Only start reading if the I2C bus is available self._log.info(2, "Reading DIR register from port [0x{:0{}X}] of board [0x{:0{}X}]".format(port_id, 2, board_id, 2)) self._i2cMutex.acquire() try: if DEMO_MODE_ONLY: return_value = 0xff print("SIMULATION : reading DIR register [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(return_value, 2, port_id, 2, board_id, 2)) else: # Read the current state of the IO register, then set ('OR') the one pin return_value = self.i2cbus.read_byte_data(board_id, port_id) except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = -1 finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() else: return_value = -1 return return_value def SetI2CDirPin(self, board_id, pin_nr): """ Sets a pin to INPUT on a board Pin number must be between 0 and 15 """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(pin_nr,str)): pin_nr = int(pin_nr, 16) # Verify if MCP23017 pin number between 0 and 15 if (pin_nr < 0) or (pin_nr > 15): return_value = False else: # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = True # Pin values up to 0x0f go to IODIRA, higher values go to IODIRB if (pin_nr > 7): port_id = IODIRB pin_nr = pin_nr % 8 else: port_id = IODIRA # Only start writing if the I2C bus is available self._log.info(2, "Setting pin [0x{:0{}X}] to INPUT port [0x{:0{}X}] for board [0x{:0{}X}]".format(pin_nr, 2, port_id, 2, board_id,2)) self._i2cMutex.acquire() try: # Read the current state of the IODIR, then set ('OR') the one pin if DEMO_MODE_ONLY: data_byte = (1 << pin_nr) print("SIMULATION : setting pin [0x{:0{}X}] to INPUT port [0x{:0{}X}] for board [0x{:0{}X}]".format(data_byte, 2, port_id, 2, board_id,2)) else: data_byte = self.i2cbus.read_byte_data(board_id, port_id) | (1 << pin_nr) self.i2cbus.write_byte_data(board_id, port_id, data_byte) except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = False finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() else: return_value = False return return_value def ClearI2CDirPin(self, board_id, pin_nr): """ Sets a pin to OUTPUT on a board Pin number must be between 0 and 15 """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(pin_nr,str)): pin_nr = int(pin_nr, 16) # Verify if MCP23017 pin number between 0 and 15 if (pin_nr < 0) or (pin_nr > 15): return_value = False else: # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = True # Pin values up to 0x0f go to IODIRA, higher values go to IODIRB if (pin_nr > 7): port_id = IODIRB pin_nr = (pin_nr % 8) else: port_id = IODIRA # Only start writing if the I2C bus is available self._log.info(2, "Setting pin [0x{:0{}X}] to OUTPUT on port [0x{:0{}X}] for board [0x{:0{}X}]".format(pin_nr, 2, port_id, 2, board_id,2)) self._i2cMutex.acquire() try: if DEMO_MODE_ONLY: data_byte = (1 << pin_nr) print("SIMULATION : Setting pin [0x{:0{}X}] to OUTPUT on port [0x{:0{}X}] for board [0x{:0{}X}]".format(data_byte, 2, port_id, 2, board_id, 2)) else: # Read the current state of the IODIR, then clear ('AND') the one pin data_byte = self.i2cbus.read_byte_data(board_id, port_id) & ~(1 << pin_nr) self.i2cbus.write_byte_data(board_id, port_id, data_byte) except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = False finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() else: return_value = False return return_value def GetI2CPin(self, board_id, pin_nr): """ Gets the current value of a pin on a board Pin number must be between 0 and 15 """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(pin_nr,str)): pin_nr = int(pin_nr, 16) # Verify if MCP23017 pin number between 0 and 15 if (pin_nr < 0) or (pin_nr > 15): return_value = -1 #aise Exception("Pin number must be between 0 and 15, but got [", pin_nr, "] for board ", board_id) else: # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = 1 # Pin values up to 0x0f go to GPIOA, higher values go to GPIOB if (pin_nr > 7): port_id = GPIOB pin_nr = pin_nr % 8 else: port_id = GPIOA # Only start reading if the I2C bus is available self._log.info(2, "Reading pin [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(pin_nr, 2, port_id, 2, board_id, 2)) self._i2cMutex.acquire() try: if DEMO_MODE_ONLY: return_value = (1 << pin_nr) print("SIMULATION : reading pin [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(return_value, 2, port_id, 2, board_id, 2)) else: # Read the current state of the IO register, then set ('OR') the one pin if (self.i2cbus.read_byte_data(board_id, port_id) & (1 << pin_nr)) == 0x00: return_value = 0 else: return_value = 1 except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = -1 finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() else: return_value = -1 return return_value def GetI2CIORegister(self, board_id, reg_nr): """ Gets the current value of a pin on a board Pin number must be between 0 and 15 """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(reg_nr,str)): reg_nr = int(reg_nr, 16) # Verify if MCP23017 pin number between 0 and 15 if (reg_nr < 0) or (reg_nr > 15): return_value = -1 #aise Exception("Pin number must be between 0 and 15, but got [", pin_nr, "] for board ", board_id) else: # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = 1 # Pin values up to 0x0f go to GPIOA, higher values go to GPIOB if (reg_nr > 0): port_id = GPIOB else: port_id = GPIOA # Only start reading if the I2C bus is available self._log.info(2, "Reading register [0x{:0{}X}], i.e. port [0x{:0{}X}] of board [0x{:0{}X}]".format(reg_nr, 2, port_id, 2, board_id, 2)) self._i2cMutex.acquire() try: if DEMO_MODE_ONLY: return_value = 0xff print("SIMULATION : reading register [0x{:0{}X}] from port [0x{:0{}X}] of board [0x{:0{}X}]".format(return_value, 2, port_id, 2, board_id, 2)) else: # Read the current state of the IO register, then set ('OR') the one pin return_value = self.i2cbus.read_byte_data(board_id, port_id) except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = -1 finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() else: return_value = -1 return return_value def SetI2CPin(self, board_id, pin_nr): """ Sets a pin to HIGH on a board Pin number must be between 0 and 15 """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(pin_nr,str)): pin_nr = int(pin_nr, 16) # Verify if MCP23017 pin number between 0 and 15 if (pin_nr < 0) or (pin_nr > 15): return_value = False #aise Exception("Pin number must be between 0 and 15, but got [", pin_nr, "] for board ", board_id) else: # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = True # Pin values up to 0x0f go to GPIOA, higher values go to GPIOB if (pin_nr > 7): port_id = GPIOB pin_nr = pin_nr % 8 else: port_id = GPIOA # Only start writing if the I2C bus is available self._log.info(2, "Setting pin [0x{:0{}X}] to HIGH on port [0x{:0{}X}] for board [0x{:0{}X}]".format(pin_nr, 2, port_id, 2, board_id, 2)) self._i2cMutex.acquire() try: if DEMO_MODE_ONLY: data_byte = (1 << pin_nr) print("SIMULATION : setting pin [0x{:0{}X}] to HIGH on port [0x{:0{}X}] for board [0x{:0{}X}]".format(data_byte, 2, port_id, 2, board_id, 2)) else: # Read the current state of the IO register, then set ('OR') the one pin data_byte = self.i2cbus.read_byte_data(board_id, port_id) | (1 << pin_nr) self.i2cbus.write_byte_data(board_id, port_id, data_byte) except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = False finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() else: return_value = False return return_value def ClearI2CPin(self, board_id, pin_nr): """ Sets a pin to LOW on a board Pin number must be between 0 and 15 """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(pin_nr,str)): pin_nr = int(pin_nr, 16) # Verify if MCP23017 pin number between 0 and 15 if (pin_nr < 0) or (pin_nr > 15): return_value = False else: # Verify if board used already, initialize if not if self.CheckInitializeBoard(board_id): return_value = True # Pin values up to 0x0f go to GPIOA, higher values go to GPIOB if (pin_nr > 7): port_id = GPIOB pin_nr = (pin_nr % 8) else: port_id = GPIOA # Only start writing if the I2C bus is available self._log.info(2, "Setting pin [0x{:0{}X}] to LOW on port [0x{:0{}X}] for board [0x{:0{}X}]".format(pin_nr, 2, port_id, 2, board_id, 2)) self._i2cMutex.acquire() try: if DEMO_MODE_ONLY: data_byte = (1 << pin_nr) print("SIMULATION : setting pin [0x{:0{}X}] to LOW on port [0x{:0{}X}] for board [0x{:0{}X}]".format(data_byte, 2, port_id, 2, board_id, 2)) else: # Read the current state of the IO register, then set ('OR') the one pin data_byte = self.i2cbus.read_byte_data(board_id, port_id) & ~(1 << pin_nr) self.i2cbus.write_byte_data(board_id, port_id, data_byte) except: # An error happened when accessing the new board, maybe non-existing on the bus return_value = False finally: # Free Mutex to avoid a deadlock situation self._i2cMutex.release() else: return_value = False return return_value def ToggleI2CPin(self, board_id, pin_nr, acquire_state = False): """ Toggles a bit on the board. If the pin is high, it will be momentarily set to low. If it is low, it will toggle to high. Pin number must be between 0 and 15. Per default it is expected that the pin is low in the "off" state and has to be toggled high, e.g. to trigger a momentary switch. In some cases, the trigger is to the "other" side. acquire_state can be set to first assess the pin and briefly toggle the pin to the other high/low state. """ # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(pin_nr,str)): pin_nr = int(pin_nr, 16) # Verify if MCP23017 pin number between 0 and 15 if (pin_nr < 0) or (pin_nr > 15): return_value = False else: return_value = True # Toggling can take a long time, during which the server would not be able to process additional commands. # To avoid that the server is frozen, toggles are processed in separate threads. a_thread = Thread(target = self.PinToggler, args = [board_id, pin_nr], daemon = False) a_thread.start() return return_value def WaitForPinToBeReleased(self, board_id, pin_nr, lock_if_free = False): """ Toggling can take a long time, during which the server would not be able to process additional commands. To avoid that the server is frozen, toggles are processed in separate threads. The boards being processed are maintained in the _toggle_set. As long as a thread has a toggle action going on, no other actions are allowed on the specific board/pin combination. Therefore, all writes have to wait for the pin to be freed up again. """ # The verification can not last longer than a TOGGLEDELAY. Keep track of the time, and time-out if necessary checking_time = datetime.now() keep_checking = True while keep_checking: # The _toggle_set is protected with a mutex to avoid that two threads are manipulating at the same # moment, thus resulting in data errors. acquired = self._toggle_mutex.acquire(blocking = True, timeout = COMMAND_TIMEOUT) if acquired: if (board_id, pin_nr) not in self._toggle_set: if lock_if_free: self._toggle_set.add((board_id, pin_nr)) keep_checking = False self._toggle_mutex.release() if (datetime.now() - checking_time).total_seconds() > max (COMMAND_TIMEOUT, TOGGLEDELAY): keep_checking = False raise "Time-out error trying to acquire pin {} on board {}".format(board_id, pin_nr) def PinToggler(self, board_id, pin_nr, acquire_state = False): """ The PinToggler is a separate process, run in a thread. This allows the main loop to continue processing other read/write requests. """ # First make sure to do the bookkeeping. if self.CheckInitializeBoard(board_id): Process_Toggle = False try: self.WaitForPinToBeReleased(board_id, pin_nr, True) Process_Toggle = True except Exception as err: self._log.error(2, "Unable to toggle pin [0x{:0{}X}] on board [0x{:0{}X}]: Could not get pin free within [{}] seconds. Error Message: {}".format(pin_nr, 2, board_id, 2, COMMAND_TIMEOUT, err)) Process_Toggle = False if Process_Toggle: self._log.info(2, "Toggling pin [0x{:0{}X}] on board [0x{:0{}X}]".format(pin_nr, 2, board_id, 2)) # Default is that pin is toggled from low to high briefly. # If 'acquire_state' is set, the current state is assessed, and switched briefly to the "other" high/low state. if acquire_state: current_state = self.GetI2CPin(board_id, pin_nr) else: # Default is Low for current state and toggle to high to switch on e.g. a momentary switch. current_state = 0x0 if current_state == 0x0: # Current state is low (0x0), and toggling needs to go to high briefly self._log.info(2, "Toggling pin [0x{:0{}X}] on board [0x{:0{}X}] from LOW to HIGH".format(pin_nr, 2, board_id, 2)) self.SetI2CPin(board_id, pin_nr) time.sleep(TOGGLEDELAY) self.ClearI2CPin(board_id, pin_nr) self._log.info(2, "Toggled pin [0x{:0{}X}] on board [0x{:0{}X}] back from HIGH to LOW".format(pin_nr, 2, board_id, 2)) if current_state == 0x1: # Current state is high (0x1 or more), and toggling needs to go to low briefly self._log.info(2, "Toggling pin [0x{:0{}X}] on board [0x{:0{}X}] from HIGH to LOW".format(pin_nr, 2, board_id, 2)) self.ClearI2CPin(board_id, pin_nr) time.sleep(TOGGLEDELAY) self.SetI2CPin(board_id, pin_nr) self._log.info(2, "Toggled pin [0x{:0{}X}] on board [0x{:0{}X}] back from LOW to HIGH".format(pin_nr, 2, board_id, 2)) self._log.info(2, "Releasing (0x{:0{}X}, 0x{:0{}X}) from the Toggle set".format(board_id, 2, pin_nr, 2)) # Make sure to remove the board/pin pair from the _toggle_set at the end, or the pin will be blocked for all other processing self._toggle_set.remove((board_id, pin_nr)) else: self._log.error(2, "Toggling pin failed for [0x{:0{}X}] on board [0x{:0{}X}]: could not initialize board.".format(pin_nr, 2, board_id, 2)) def BusIDBlinker(self, board_id = 0x20, num_flashes = 10): """ Test routine only, briefly switches pin 15 on the board on and off. It is used to find back a board in the rack. Please mind that this is a specific routine which expects pin 15 of the MCP23017 to be set as output to an identification LED. """ if(isinstance(board_id,str)): board_id = int(board_id, 16) for i in range(0, num_flashes): self.ClearI2CPin(board_id,15) time.sleep(0.5) self.SetI2CPin(board_id,15) time.sleep(0.5) class xmlParameterHandler(): """ A class to handle an XML config file that keeps track of boards that were processed. This XML Parameter Handler is used at boot time, so that the DIR pins of the different boards are set to their last remembered state. I.e. inputs are set back to inputs and outputs are re-configured as outputs after the cold boot. During the processing, the XML file is constantly updated when the DIR (input vs. output) of a pin changes. """ def __init__(self, the_log, xml_file_name = ''): # Copy logfile to local self._log = the_log # Only read config file if a name was provided if (CONFIGURATION_FILE == '') and (xml_file_name == ''): self._confdata = ET.fromstring(b'<DATA>\n <i2cboards>\n </i2cboards>\n</DATA>') self._use_config_file = False else: self._use_config_file = True from os.path import expanduser # Set location of file, go default if no file given if xml_file_name == "": self._filename = "{}/{}".format(expanduser("~"), CONFIGURATION_FILE) else: self._filename = xml_file_name # Create initial empty datastring self.read_parameter_file() @property def get_all_boards(self): return self._confdata[0] def get_board_dir(self, board_id, port_id): """ Get the Direction value of a specific board """ return_value = "0xff" if self._use_config_file: if(isinstance(board_id, int)): board_id = '0x{:0{}X}'.format(board_id,2) if(isinstance(port_id, int)): port_id = '0x{:0{}X}'.format(port_id,2) have_found_lev1 = False for child in self._confdata[0]: have_found_lev2 = False if child.attrib["name"] == board_id: have_found_lev1 = True for subchild in child: if subchild.attrib["name"] == port_id: return_value = subchild.text have_found_lev2 = True if (not(have_found_lev2)) or (len(child) != 2): self._confdata[0].remove(child) have_found_lev1 = False if not(have_found_lev1): self.CreateNewKey(board_id) return return_value def set_board_dir(self, board_id, port_id, newvalue): """ Set the Direction value for a specific board """ return_value = True if self._use_config_file: # if byte or integer given, update to hex byte if(isinstance(board_id, int)): board_id = '0x{:0{}X}'.format(board_id,2) if(isinstance(port_id, int)): port_id = '0x{:0{}X}'.format(port_id,2) if(isinstance(newvalue, int)): newvalue = '0x{:0{}X}'.format(newvalue,2) # Verify if value already exists (and create key if not in the file yet) comparevalue = self.get_board_dir(board_id, port_id) # update board and port pair, and write back to paramete file if comparevalue != newvalue: for child in self._confdata[0]: if child.attrib["name"] == board_id: for subchild in child: if subchild.attrib["name"] == port_id: subchild.text = newvalue return_value = self.write_parameter_file() return return_value def set_board_pin(self, board_id, pin_nr): """ Set the pin value of the Direction register for a specific board """ return_value = True if self._use_config_file: # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(pin_nr,str)): pin_nr = int(pin_nr, 16) # Pin values up to 0x0f go to IODIRA, higher values go to IODIRB if (pin_nr > 7): port_id = IODIRB pin_nr = pin_nr % 8 else: port_id = IODIRA currentvalue = self.get_board_dir(board_id, port_id) if(isinstance(currentvalue,str)): currentvalue = int(currentvalue, 16) newvalue = currentvalue | (1 << pin_nr) return_value = self.set_board_dir(board_id, port_id, newvalue) return True def clear_board_pin(self, board_id, pin_nr): """ Clear the pin value of the Direction register for a specific board """ return_value = True if self._use_config_file: # Verify in inputs are given as hex. Convert to int if so if(isinstance(board_id,str)): board_id = int(board_id, 16) if(isinstance(pin_nr,str)): pin_nr = int(pin_nr, 16) # Pin values up to 0x0f go to IODIRA, higher values go to IODIRB if (pin_nr > 7): port_id = IODIRB pin_nr = pin_nr % 8 else: port_id = IODIRA currentvalue = self.get_board_dir(board_id, port_id) if(isinstance(currentvalue,str)): currentvalue = int(currentvalue, 16) newvalue = currentvalue & ~(1 << pin_nr) return_value = self.set_board_dir(board_id, port_id, newvalue) return return_value def DeleteKey(self, board_id): """ Clear the Key in the XML file for a board that is apparently no longer used. """ return_value = True if self._use_config_file: if(isinstance(board_id, int)): board_id = '0x{:0{}X}'.format(board_id,2) have_found = False for child in self._confdata[0]: if child.attrib["name"] == board_id: have_found = True self._confdata[0].remove(child) if have_found: return_value = self.write_parameter_file() return return_value def CreateNewKey(self, board_id): """ Create a new Key in the XML file and set the initial values to OUTPUT (Oxff). """ return_value = True if self._use_config_file: if(isinstance(board_id, int)): board_id = '0x{:0{}X}'.format(board_id,2) # make sure you are not creating a key that already exists self.DeleteKey(board_id) attrib = {'name': board_id} element = self._confdata[0].makeelement('board', attrib) self._confdata[0].append(element) index = len(self._confdata[0]) - 1 attrib = {'name': '0x{:0{}X}'.format(IODIRA,2)} element = self._confdata[0][index].makeelement('port', attrib) element.text = ALLOUTPUTS self._confdata[0][index].append(element) attrib = {'name': '0x{:0{}X}'.format(IODIRB,2)} element = self._confdata[0][index].makeelement('port', attrib) element.text = ALLOUTPUTS self._confdata[0][index].append(element) return_value = self.write_parameter_file() return return_value def read_parameter_file(self): """ Read the XML parameter file from the current home directory. Create an empty new one if nothing exists. """ return_value = True if self._use_config_file: if os.path.exists(self._filename): self._log.info(2, "Reading Config XML file") try: # Read file, this will fail if the file does not exist (yet) ConfTree = ET.parse(self._filename) self._confdata = ConfTree.getroot() except: self._log.info(2, "Reading Config file FAILED. Creating a new one. ") self._confdata = ET.fromstring(b'<DATA>\n <i2cboards>\n </i2cboards>\n</DATA>') return_value = self.write_parameter_file() else: self._confdata = ET.fromstring(b'<DATA>\n <i2cboards>\n </i2cboards>\n</DATA>') return_value = self.write_parameter_file() return return_value def write_parameter_file(self): """ Write the XML parameter file from the current home directory. Just try ... """ return_value = True if self._use_config_file: self._log.info(2, "Writing Config file. ") try: self.xml_pretty_print(self._confdata[0]) outString = ET.tostring(self._confdata) outFile = open(self._filename,"w") outFile.write(outString.decode('ascii')) outFile.close() return_value = True except Exception as err: return_value = False # Disable further write attempts if the file cannot be written. self._use_config_file = False if LOG_LEVEL > 0: print("Could not write parameter file [{}]. Error: {}".format(self._filename, err)) self._log.info("Could not write parameter file [{}]. Error: {}".format(self._filename, err)) return return_value def xml_pretty_print(self, element, level=0): """ Format the XML data as properly indented items for better reading. """ # Inspired by https://norwied.wordpress.com/2013/08/27/307/ # Kudos go to Norbert and <NAME> padding = ' ' indent = "\n{}".format(padding * level) if len(element): if not element.text or not element.text.strip(): element.text = "{} ".format(indent) if not element.tail or not element.tail.strip(): element.tail = indent for elem in element: self.xml_pretty_print(elem, level+1) if not element.tail or not element.tail.strip(): element.tail = indent else: if level and (not element.tail or not element.tail.strip()): element.tail = indent class LogThis(): """ A class for keeping track of the logging. In case that logging is requested, errors are tracked in the log file if the level is > 0. At high verbosity (level >= 3), all actions are logged for debugging purposes. """ def __init__(self): # Set Logging details if LOG_LEVEL > 0: self._log_enabled = True try: from os.path import expanduser # Set location of file, go default if no file given self._filename = "{}/{}".format(expanduser("~"), LOG_FILE) self.log_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') self.my_handler = RotatingFileHandler(self._filename, mode='a', maxBytes=10*1024*1024, backupCount=2, encoding=None, delay=0) self.my_handler.setFormatter(self.log_formatter) self.my_handler.setLevel(logging.INFO) self.app_log = logging.getLogger('root') self.app_log.setLevel(logging.INFO) self.app_log.addHandler(self.my_handler) except Exception as err: self._log_enabled = False if LOG_LEVEL > 0: print("Error while creating log file: {}. ".format(str(err))) else: self._log_enabled = False def info(self, info_level, info_text): if self._log_enabled: if (LOG_LEVEL > 1) or (info_level == LOG_LEVEL): self.app_log.info(info_text) def debug(self, info_level, info_text): if self._log_enabled: if (LOG_LEVEL > 1) or (info_level == LOG_LEVEL): self.app_log.debug(info_text) def error(self, info_level, info_text): if self._log_enabled: if (LOG_LEVEL > 1) or (info_level == LOG_LEVEL): self.app_log.error(info_text) def InitBusAtBoot(the_log, xmldata, i2chandler): """ If the program starts first time, pull the remembered boards from the XML config file. Set the proper input/output pin states to the last ones remembered. """ # Read the configured boards from the config file the_log.info(2, "Reading board information from XML parameter file.") boarddata = xmldata.get_all_boards # Process boards one by one for board in boarddata: # Get the board ID (hex board number) board_id = board.attrib["name"] # Process both ports in the MCP23017 board (if configured both) for port in board: # Get Port A or B ID port_id = port.attrib["name"] # print error message to the systemctl log file if LOG_LEVEL == 2: print("Port [{}] of board [{}] should be set to [{}]".format(port_id, board_id, port.text)) the_log.info(2, "Port [{}] of board [{}] should be set to [{}]".format(port_id, board_id, port.text)) # Write the I/O state to the port if not(i2chandler.WriteI2CDir(board_id, port_id, port.text)): if LOG_LEVEL == 2: print("That didn't work for board [{}]".format(board_id)) the_log.info(2, "That didn't work for board [{}]".format(board_id)) # If that didn't work, the board may have been removed before booting. Remove it from the config file. xmldata.DeleteKey(board_id) def main(): """ Main program function. """ # Start a logger and provide info my_log = LogThis() my_log.info(1, "mcp23017server starting, running version [{}].".format(VERSION)) # Parameter file for board input/output configurations my_log.info(2, "Creating XML Parameter Handler") xmldata = xmlParameterHandler(my_log) # Separate I2C handler, including a Mutex to make sure other clients are not messing with the I2C bus my_log.info(2, "Creating I2C Communication Handler") i2chandler = i2cCommunication(my_log) # Initialize the I2C bus at first run (manual run), or at boot time (if set up as a service). my_log.info(2, "Initializing I2C devices") InitBusAtBoot(my_log, xmldata, i2chandler) # Set up a new broker - this is the main part of the software. my_log.info(2, "Creating a Message Broker") mybroker = mcp23017broker(my_log, i2chandler, xmldata) # Process commands forever while True: mybroker.service_commands() my_log.error(1, "FATAL EXIT WITH ERROR [{}]".format(my_error_state)) # Do a controlled exist with fail code. Trigger the OS to restart the service if configured. sys.exit(1) if __name__ == "__main__": """ Entry point when program is called from the command line. """ main()
# import modules ---------------------------------------- import trimesh from shapely.geometry import LineString import numpy as np import matplotlib.pyplot as plt import timeit # Load in STL file -------------------------------------- start = timeit.default_timer() #start timer stl_mesh = trimesh.load_mesh("3DBenchy.stl") stop = timeit.default_timer() #stop timer print("File loaded in ",stop-start," [s]") # Generte Intersection Rays ----------------------------- start = timeit.default_timer() #start timer width_num = 100 # display width (pixels) [#] width_dim = np.array([-32,32]) # display width (size) [mm] height_num = 75 # display height (pixels) [#] height_dim = np.array([-1,49]) # display heigth (size) [mm] # get smallest cylinder that fits around stl file cylinder_rad = np.max(np.sqrt(stl_mesh.vertices[:,0]**2+stl_mesh.vertices[:,1]**2)) cylinder_height = np.array([np.min(stl_mesh.vertices[:,2]),np.max(stl_mesh.vertices[:,2])]) ray_origins = np.zeros([width_num,height_num,3]) ray_directions = np.zeros([width_num*height_num,3]) x_val = np.linspace(width_dim[0],width_dim[1],width_num) # x coordinates y_val = cylinder_rad + 1 # y coordinates z_val = np.linspace(height_dim[0],height_dim[1],height_num) # z coordinates ray_origins[:,:,0] = np.meshgrid(np.ones(height_num),x_val)[1] # x coordinates ray_origins[:,:,1] = cylinder_rad # y coordinates ray_origins[:,:,2] = np.meshgrid(z_val,np.ones(width_num))[0] # z coordinates ray_origins = ray_origins.reshape([-1,3]) ray_directions[:,0] = 0 # x component ray_directions[:,1] = -1 # y component ray_directions[:,2] = 0 # z component stop = timeit.default_timer() #stop timer print("Generate Intersection Rays in ",stop-start," [s]") # rotate the rays to face stl angle at theta ----------- theta = np.deg2rad(0) # and of rays [deg] frame_num = 6 # number of frames from 0-180 deg obj_func = np.zeros([width_num,height_num,frame_num]) # initize array theta = np.deg2rad(0) # and of rays [deg] def Rotate(V,t): V_rotated = V.copy() V_rotated[:,0] = V[:,0]*np.cos(theta) - V[:,1]*np.sin(theta) V_rotated[:,1] = V[:,0]*np.sin(theta) + V[:,1]*np.cos(theta) return V_rotated for frame in range(frame_num): start = timeit.default_timer() #start timer ray_origins = Rotate(ray_origins,theta) ray_directions = Rotate(ray_directions,theta) theta = np.deg2rad(180/(frame_num+1)) stop = timeit.default_timer() #stop timer print("Rotated Rays in ",stop-start," [s]") ##### ##### # compute intersections --------------------------------- start = timeit.default_timer() #start timer locations, index_ray, index_tri = stl_mesh.ray.intersects_location( ray_origins=ray_origins, ray_directions=ray_directions) stop = timeit.default_timer() #stop timer print("Compute intersections in ",stop-start," [s]") # Generate Object function ------------------------------ start = timeit.default_timer() #start timer ray = 0 for w in range(width_num): for h in range(height_num): dispacement = locations[index_ray==ray,:2][1:] - locations[index_ray==ray,:2][:-1] #if (len(dispacement[:,0])%2==1): #print("something wrong with ray ", ray) obj_func[w,h,frame] = np.sqrt(np.sum(dispacement[::2,0]**2 + dispacement[::2,1]**2)) ray += 1 stop = timeit.default_timer() #stop timer print("Generate object functions in ",stop-start," [s]") plt.imshow(obj_func[:,:,0],cmap='gray') plt.show()
<reponame>InnovArul/DIGITS # Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import from flask.ext.wtf import Form import os from wtforms import validators from digits import utils from digits.utils import subclass from digits.utils.forms import validate_required_if_set @subclass class DatasetForm(Form): """ A form used to create an image processing dataset """ def validate_folder_path(form, field): if not field.data: pass else: # make sure the filesystem path exists if not os.path.exists(field.data) or not os.path.isdir(field.data): raise validators.ValidationError('Folder does not exist or is not reachable') else: return True train_image_folder = utils.forms.StringField( u'Training image folder', validators=[ validators.DataRequired(), validate_folder_path, ], tooltip="Indicate a folder of images to use for training" ) train_label_folder = utils.forms.StringField( u'Training label folder', validators=[ validators.DataRequired(), validate_folder_path, ], tooltip="Indicate a folder of training labels" ) val_image_folder = utils.forms.StringField( u'Validation image folder', validators=[ validate_required_if_set('val_label_folder'), validate_folder_path, ], tooltip="Indicate a folder of images to use for training" ) val_label_folder = utils.forms.StringField( u'Validation label folder', validators=[ validate_required_if_set('val_image_folder'), validate_folder_path, ], tooltip="Indicate a folder of validation labels" ) resize_image_width = utils.forms.IntegerField( u'Resize Image Width', validators=[ validate_required_if_set('resize_image_height'), validators.NumberRange(min=1), ], tooltip="If specified, images will be resized to that dimension after padding" ) resize_image_height = utils.forms.IntegerField( u'Resize Image Height', validators=[ validate_required_if_set('resize_image_width'), validators.NumberRange(min=1), ], tooltip="If specified, images will be resized to that dimension after padding" ) padding_image_width = utils.forms.IntegerField( u'Padding Image Width', default=1248, validators=[ validate_required_if_set('padding_image_height'), validators.NumberRange(min=1), ], tooltip="If specified, images will be padded to that dimension" ) padding_image_height = utils.forms.IntegerField( u'Padding Image Height', default=384, validators=[ validate_required_if_set('padding_image_width'), validators.NumberRange(min=1), ], tooltip="If specified, images will be padded to that dimension" ) channel_conversion = utils.forms.SelectField( u'Channel conversion', choices=[ ('RGB', 'RGB'), ('L', 'Grayscale'), ('none', 'None'), ], default='RGB', tooltip="Perform selected channel conversion." ) val_min_box_size = utils.forms.IntegerField( u'Minimum box size (in pixels) for validation set', default='25', validators=[ validators.DataRequired(), validators.NumberRange(min=0), ], tooltip="Retain only the boxes that are larger than the specified " "value in both dimensions. This only affects objects in " "the validation set. Enter 0 to disable this threshold." ) custom_classes = utils.forms.StringField( u'Custom classes', validators=[ validators.Optional(), ], tooltip="Enter a comma-separated list of lower-case class names. " "Class IDs are assigned sequentially, starting from 0. " "Leave this field blank to use default class mappings. " "See object detection extension documentation for more " "information." )
# Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 from collections import namedtuple import torch SymmArrowhead = namedtuple("SymmArrowhead", ["top", "bottom_diag"]) TriuArrowhead = namedtuple("TriuArrowhead", ["top", "bottom_diag"]) def sqrt(x): """ EXPERIMENTAL Computes the upper triangular square root of an symmetric arrowhead matrix. :param SymmArrowhead x: an symmetric arrowhead matrix :return: the square root of `x` :rtype: TriuArrowhead """ assert isinstance(x, SymmArrowhead) head_size = x.top.size(0) if head_size == 0: return TriuArrowhead(x.top, x.bottom_diag.sqrt()) A, B = x.top[:, :head_size], x.top[:, head_size:] # NB: the complexity is O(N * head_size^2) # ref: https://en.wikipedia.org/wiki/Schur_complement#Background Dsqrt = x.bottom_diag.sqrt() # On cholesky error, retry with smaller tail part B. num_attempts = 6 for i in range(num_attempts): B_Dsqrt = B / Dsqrt.unsqueeze(-2) # shape: head_size x N schur_complement = A - B_Dsqrt.matmul(B_Dsqrt.t()) # complexity: head_size^2 x N # we will decompose schur_complement to U @ U.T (so that the sqrt matrix # is upper triangular) using some `flip` operators: # flip(cholesky(flip(schur_complement))) try: top_left = torch.flip(torch.cholesky(torch.flip(schur_complement, (-2, -1))), (-2, -1)) break except RuntimeError: B = B / 2 continue raise RuntimeError("Singular schur complement in computing Cholesky of the input" " arrowhead matrix") top_right = B_Dsqrt top = torch.cat([top_left, top_right], -1) bottom_diag = Dsqrt return TriuArrowhead(top, bottom_diag) def triu_inverse(x): """ EXPERIMENTAL Computes the inverse of an upper-triangular arrowhead matrix. :param TriuArrowhead x: an upper-triangular arrowhead matrix. :return: the inverse of `x` :rtype: TriuArrowhead """ assert isinstance(x, TriuArrowhead) head_size = x.top.size(0) if head_size == 0: return TriuArrowhead(x.top, x.bottom_diag.reciprocal()) A, B = x.top[:, :head_size], x.top[:, head_size:] B_Dinv = B / x.bottom_diag.unsqueeze(-2) identity = torch.eye(head_size, dtype=A.dtype, device=A.device) top_left = torch.triangular_solve(identity, A, upper=True)[0] top_right = -top_left.matmul(B_Dinv) # complexity: head_size^2 x N top = torch.cat([top_left, top_right], -1) bottom_diag = x.bottom_diag.reciprocal() return TriuArrowhead(top, bottom_diag) def triu_matvecmul(x, y, transpose=False): """ EXPERIMENTAL Computes matrix-vector product of an upper-triangular arrowhead matrix `x` and a vector `y`. :param TriuArrowhead x: an upper-triangular arrowhead matrix. :param torch.Tensor y: a 1D tensor :return: matrix-vector product of `x` and `y` :rtype: TriuArrowhead """ assert isinstance(x, TriuArrowhead) head_size = x.top.size(0) if transpose: z = x.top.transpose(-2, -1).matmul(y[:head_size]) # here we exploit the diagonal structure of the bottom right part # of arrowhead_sqrt matrix; so the complexity is still O(N) top = z[:head_size] bottom = z[head_size:] + x.bottom_diag * y[head_size:] else: top = x.top.matmul(y) bottom = x.bottom_diag * y[head_size:] return torch.cat([top, bottom], 0) def triu_gram(x): """ EXPERIMENTAL Computes the gram matrix `x.T @ x` from an upper-triangular arrowhead matrix `x`. :param TriuArrowhead x: an upper-triangular arrowhead matrix. :return: the square of `x` :rtype: TriuArrowhead """ assert isinstance(x, TriuArrowhead) head_size = x.top.size(0) if head_size == 0: return x.bottom_diag.pow(2) A, B = x.top[:, :head_size], x.top[:, head_size:] top = A.t().matmul(x.top) bottom_left = top[:, head_size:].t() # the following matmul operator is O(N^2 x head_size) bottom_right = B.t().matmul(B) + x.bottom_diag.pow(2).diag() return torch.cat([top, torch.cat([bottom_left, bottom_right], -1)], 0)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2022, pysat development team # Full license can be found in License.md # ----------------------------------------------------------------------------- """Routines to match modelled and observational data.""" import datetime as dt import numpy as np import pandas as pds import pysat import pysatModels from pysatModels.utils.convert import load_model_xarray from pysatModels.utils import extract def collect_inst_model_pairs(start, stop, tinc, inst, inst_download_kwargs=None, model_load_rout=load_model_xarray, model_load_kwargs=None, inst_clean_rout=None, inst_lon_name=None, mod_lon_name=None, lon_pos='end', inst_name=None, mod_name=None, mod_datetime_name=None, mod_time_name=None, mod_units=None, sel_name=None, time_method='min', pair_method='closest', method='linear', model_label='model', comp_clean='clean'): """Pair instrument and model data. Parameters ---------- start : dt.datetime Starting datetime stop : dt.datetime Ending datetime tinc : dt.timedelta Time incriment for model files inst : pysat.Instrument Instrument object for which modelled data will be extracted inst_download_kwargs : dict or NoneType Optional keyword arguments for downloading instrument data (default=None) model_load_rout : func Routine to load model data into an xarray using datetime as argument input input and other necessary data as keyword arguments. If the routine requires a time-dependent filename, ensure that the load routine uses the datetime input to construct the correct filename, as done in load_model_xarray. (default=load_model_xarray) model_load_kwargs : dict or NoneType Keyword arguments for the model loading routine. (default=None) inst_clean_rout : func Routine to clean the instrument data. (default=None) inst_lon_name : str variable name for instrument longitude mod_lon_name : str variable name for model longitude lon_pos : str or int Accepts zero-offset integer for list order or 'end' (default='end') inst_name : list or NoneType List of names of the data series to use for determing instrument location. (default=None) mod_name : list or NoneType List of names of the data series to use for determing model locations in the same order as inst_name. These must make up a regular grid. (default=None) mod_datetime_name : str Name of the data series in the model Dataset containing datetime info mod_time_name : str Name of the time coordinate in the model Dataset mod_units : list or NoneType Units for each of the mod_name location attributes. Currently supports: rad/radian(s), deg/degree(s), h/hr(s)/hour(s), m, km, and cm. (default=None) sel_name : list or NoneType list of names of modelled data indices to append to instrument object, or None to append all modelled data (default=None) time_method : str Pair data using larger (max) or smaller (min) of the smallest instrument/model time increments (default='min') pair_method : str Find all relevent pairs ('all') or just the closest pairs ('closest'). (default='closest') method : str Interpolation method. Supported are 'linear', 'nearest', and 'splinef2d'. The last is only supported for 2D data and is not recommended here. (default='linear') model_label : str name of model, used to identify interpolated data values in instrument (default="model") comp_clean : str Clean level for the comparison data ('clean', 'dusty', 'dirty', 'none') (default='clean') Returns ------- matched_inst : pysat.Instrument Instrument object with observational data from `inst` and paired modelled data. Raises ------ ValueError If input is incorrect Note ---- Perform the data cleaning after finding the times and locations where the observations and model align. """ # Initialize the output matched_inst = None # Test the input if inst_lon_name is None: raise ValueError('Need longitude name for instrument data') if mod_lon_name is None: raise ValueError('Need longitude name for model data') if mod_datetime_name is None: raise ValueError('Need datetime coordinate name for model data') if mod_time_name is None: raise ValueError('Need time coordinate name for model data') if inst_name is None or len(inst_name) == 0: estr = 'Must provide instrument location attribute names as a list' raise ValueError(estr) if mod_name is None: estr = 'Must provide model location attribute names as a list' raise ValueError(estr) if mod_units is None: raise ValueError('Must provide model units as a list') if len(inst_name) != len(mod_name): estr = ''.join(['Must provide the same number of instrument and ', 'model location attribute names as a list']) raise ValueError(estr) if len(mod_name) != len(mod_units): raise ValueError(''.join(['Must provide units for each model location', ' attribute'])) if inst_clean_rout is None: raise ValueError('Need routine to clean the instrument data') if inst_download_kwargs is None: inst_download_kwargs = {} if model_load_kwargs is None: model_load_kwargs = {} skip_download = False if "skip_download" in inst_download_kwargs.keys(): skip_download = inst_download_kwargs['skip_download'] del inst_download_kwargs['skip_download'] # Download the instrument data, if needed and wanted if not skip_download and (stop - start).days != len(inst.files[start:stop]): missing_times = [tt for tt in pds.date_range(start, stop, freq='1D', closed='left') if tt not in inst.files[start:stop].index] for tt in missing_times: inst.download(start=tt, stop=tt + pds.DateOffset(days=1), **inst_download_kwargs) # Cycle through the times, loading the model and instrument data as needed istart = start inst_lon_adjust = True inst_dims = [] while start < stop: # Load the model data for each time try: mdata = model_load_rout(start, **model_load_kwargs) except (IOError, ValueError) as err: pysatModels.logger.info( 'unable to load model data at {:}\n{:}'.format(start, err)) mdata = None if mdata is not None: # Get the range for model longitude, if it has not already been set if inst_lon_adjust: if mod_lon_name in mdata.coords: lon_high = float(mdata.coords[mod_lon_name].max()) lon_low = float(mdata.coords[mod_lon_name].min()) elif mod_lon_name in mdata.data_vars: lon_high = float(np.nanmax(mdata.data_vars[mod_lon_name])) lon_low = float(np.nanmin(mdata.data_vars[mod_lon_name])) else: raise ValueError("".join(["unknown name for model ", "longitude: ", mod_lon_name])) if lon_high > 180.0 and lon_low < 0.0: raise ValueError("unexpected longitude range") elif lon_high > 180.0 or lon_low >= 0.0: lon_low = 0.0 lon_high = 360.0 else: lon_low = -180.0 lon_high = 180.0 # Set the range of the instrument longitude inst.custom_attach(pysat.utils.coords.update_longitude, kwargs={'low': lon_low, 'lon_name': inst_lon_name, 'high': lon_high}) inst.load(date=istart) # Set flag to false now that the range has been set inst_lon_adjust = False # Load the instrument data, if needed if inst.empty or inst.index[-1] < istart: inst.load(date=istart) if not inst.empty and np.any(inst.index >= istart): added_names = extract.extract_modelled_observations( inst=inst, model=mdata, inst_name=inst_name, mod_name=mod_name, mod_datetime_name=mod_datetime_name, mod_time_name=mod_time_name, mod_units=mod_units, sel_name=sel_name, time_method=time_method, method=method, pair_method=pair_method, model_label=model_label) if len(added_names) > 0: # Clean the instrument data inst.clean_level = comp_clean inst_clean_rout(inst) check_name = "_".join([model_label, mod_datetime_name]) im = list() imbase = None for aname in added_names: if aname == check_name: # There is a baseline for the names imbase = np.where( np.isfinite(inst[check_name].values)) # Determine the number of good points for this data imnew = np.where(np.isfinite(inst[aname].values)) # Some data types are higher dimensions than others, # make sure we end up choosing a high dimension one # so that we don't accidently throw away paired data if len(im) == 0 or len(im[0]) < len(imnew[0]): im = imnew # Check the data against the baseline if imbase is not None: if len(im[0]) > len(imbase[0]): ikeep = [i for i, ind in enumerate(im[0]) if ind in imbase[0]] im = [imnew[ikeep] for imnew in list(im)] # If the data is 1D, save it as a list instead of a tuple if len(im) == 1: im = im[0] else: # If the dimension data hasn't been set yet, do it here if len(inst_dims) == 0: inst_dims = [inst.index.name] inst_dims.extend([dd for dd in inst.data.dims.keys() if dd != inst.index.name]) im = {kk: np.unique(im[i]) for i, kk in enumerate(inst_dims)} # Save the clean, matched data if matched_inst is None: matched_inst = inst.copy() matched_inst.data = inst[im] else: matched_inst.concat_data(inst[im]) # Reset the clean flag inst.clean_level = 'none' # Cycle the times if tinc.total_seconds() <= 86400.0: start += tinc if start + tinc > istart + dt.timedelta(days=1): istart += dt.timedelta(days=1) else: if start + tinc >= istart + dt.timedelta(days=1): istart += dt.timedelta(days=1) if istart >= start + tinc: start += tinc return matched_inst
<filename>bokeh/models/markers.py #----------------------------------------------------------------------------- # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Display a variety of simple scatter marker shapes whose attributes can be associated with data columns from ``ColumnDataSources``. The full list of markers built into Bokeh is given below: * :class:`~bokeh.models.markers.Asterisk` * :class:`~bokeh.models.markers.Circle` * :class:`~bokeh.models.markers.CircleCross` * :class:`~bokeh.models.markers.CircleX` * :class:`~bokeh.models.markers.Cross` * :class:`~bokeh.models.markers.Dash` * :class:`~bokeh.models.markers.Diamond` * :class:`~bokeh.models.markers.DiamondCross` * :class:`~bokeh.models.markers.Hex` * :class:`~bokeh.models.markers.InvertedTriangle` * :class:`~bokeh.models.markers.Square` * :class:`~bokeh.models.markers.SquareCross` * :class:`~bokeh.models.markers.SquareX` * :class:`~bokeh.models.markers.Triangle` * :class:`~bokeh.models.markers.X` Markers are all subclasses of ``Glyph``. Additionally, they all share the same common interface providing fill and line properties provided by their base class ``Marker``. Note that a few glyphs, ``Cross`` and ``X``, only draw lines. For these the fill property values are ignored. Also note that the ``Circle`` glyph has some additional properties such as ``radius`` that other markers do not. .. autoclass:: Marker :members: ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- import logging # isort:skip log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Bokeh imports from ..core.enums import enumeration from ..core.has_props import abstract from ..core.properties import ( AngleSpec, DistanceSpec, Enum, Include, MarkerSpec, NumberSpec, ScreenDistanceSpec, ) from ..core.property_mixins import FillProps, LineProps from .glyphs import XYGlyph #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- __all__ = ( 'Asterisk', 'Circle', 'CircleCross', 'CircleX', 'Cross', 'Dash', 'Diamond', 'DiamondCross', 'Hex', 'InvertedTriangle', 'Marker', 'Scatter', 'Square', 'SquareCross', 'SquareX', 'Triangle', 'X', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- @abstract class Marker(XYGlyph): ''' Base class for glyphs that are simple markers with line and fill properties, located at an (x, y) location with a specified size. .. note:: For simplicity, all markers have both line and fill properties declared, however some markers (`Asterisk`, `Cross`, `X`) only draw lines. For these markers, the fill values are simply ignored. ''' # a canonical order for positional args that can be used for any # functions derived from this class _args = ('x', 'y', 'size', 'angle') x = NumberSpec(help=""" The x-axis coordinates for the center of the markers. """) y = NumberSpec(help=""" The y-axis coordinates for the center of the markers. """) size = ScreenDistanceSpec(default=4, help=""" The size (diameter) values for the markers in screen space units. """) angle = AngleSpec(default=0.0, help=""" The angles to rotate the markers. """) line_props = Include(LineProps, use_prefix=False, help=""" The %s values for the markers. """) fill_props = Include(FillProps, use_prefix=False, help=""" The %s values for the markers. """) class Scatter(Marker): ''' Render arbitrary markers according a specification. The Scatter can draw any built-in marker type. It can be configured to draw the same marker for all values by specifying the name of a marker, e.g. .. code-block:: python glyph = Scatter(x="x", y="y", size="sizes", marker="square") plot.add_glyph(source, glyph) will render only Square markers for all points. Alternatively, the Scatter marker can be configured to use marker types specified in a data source column: .. code-block:: python # source.data['markers'] = ["circle", "square", "circle", ... ] glyph = Scatter(x="x", y="y", size="sizes", marker="markers") plot.add_glyph(source, glyph) Note that circles drawn with `Scatter` conform to the standard Marker interface, and can only vary by size (in screen units) and *not* by radius (in data units). If you need to control circles by radius in data units, you should use the Circle glyph directly. ''' # a canonical order for positional args that can be used for any # functions derived from this class _args = ('x', 'y', 'size', 'angle', 'marker') marker = MarkerSpec(default="circle", help=""" Which marker to render. This can be the name of any built in marker, e.g. "circle", or a reference to a data column containing such names. """) __example__ = "examples/reference/models/Scatter.py" class Asterisk(Marker): ''' Render asterisk '*' markers. ''' __example__ = "examples/reference/models/Asterisk.py" class Circle(Marker): ''' Render circle markers. ''' __example__ = "examples/reference/models/Circle.py" # a canonical order for positional args that can be used for any # functions derived from this class _args = ('x', 'y') radius = DistanceSpec(None, help=""" The radius values for circle markers (in "data space" units, by default). .. note:: Circle markers are slightly unusual in that they support specifying a radius in addition to a size. Only one of ``radius`` or ``size`` should be given. .. warning:: Note that ``Circle`` glyphs are always drawn as circles on the screen, even in cases where the data space aspect ratio is not 1-1. In all cases where radius values are specified, the "distance" for the radius is measured along the dimension specified by ``radius_dimension``. If the aspect ratio is very large or small, the drawn circles may appear much larger or smaller than expected. See :bokeh-issue:`626` for more information. """) radius_dimension = Enum(enumeration('x', 'y', 'max', 'min'), help=""" What dimension to measure circle radii along. When the data space aspect ratio is not 1-1, then the size of the drawn circles depends on what direction is used to measure the "distance" of the radius. This property allows that direction to be controlled. Setting this dimension to 'max' will calculate the radius on both the x and y dimensions and use the maximum of the two, 'min' selects the minimum. """) class CircleCross(Marker): ''' Render circle markers with a '+' cross through the center. ''' __example__ = "examples/reference/models/CircleCross.py" class CircleX(Marker): ''' Render circle markers with an 'X' cross through the center. ''' __example__ = "examples/reference/models/CircleX.py" class Cross(Marker): ''' Render '+' cross markers. ''' __example__ = "examples/reference/models/Cross.py" class Dash(Marker): ''' Render dash markers. Use ``angle`` to rotate and create vertically oriented short lines. ''' __example__ = "examples/reference/models/Dash.py" class Diamond(Marker): ''' Render diamond markers. ''' __example__ = "examples/reference/models/Diamond.py" class DiamondCross(Marker): ''' Render diamond markers with a '+' cross through the center. ''' __example__ = "examples/reference/models/DiamondCross.py" class Hex(Marker): ''' Render hexagon markers. ''' __example__ = "examples/reference/models/Hex.py" class InvertedTriangle(Marker): ''' Render upside-down triangle markers. ''' __example__ = "examples/reference/models/InvertedTriangle.py" class Square(Marker): ''' Render a square marker, optionally rotated. ''' __example__ = "examples/reference/models/Square.py" class SquareCross(Marker): ''' Render square markers with a '+' cross through the center. ''' __example__ = "examples/reference/models/SquareCross.py" class SquareX(Marker): ''' Render square markers with an 'X' cross through the center. ''' __example__ = "examples/reference/models/SquareX.py" class Triangle(Marker): ''' Render triangle markers. ''' __example__ = "examples/reference/models/Triangle.py" class X(Marker): ''' Render a 'X' cross markers. ''' __example__ = "examples/reference/models/X.py" #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
<filename>tensorflow/pmctree.py # !/usr/bin/python # -*- coding:utf-8 -*- import multiprocessing as mp import tensorflow as tf from sub_tree import sub_tree from sub_tree import node import sys import logging import time import Queue import numpy as np from treelib import Tree import copy from utils import compute_bleu_rouge from utils import normalize from layers.basic_rnn import rnn from layers.match_layer import MatchLSTMLayer from layers.match_layer import AttentionFlowMatchLayer from layers.pointer_net import PointerNetDecoder def main(): for idx in range(10): print idx def job(x): return x*x def test_tf(): 1==1 # x = tf.placeholder(tf.float64, shape = None) # y = tf.placeholder(tf.float64, shape = None) # z = tf.placeholder(tf.float64, shape=None) # a = np.ones((1,5,4)) # b = np.array([[[1,2],[1,3]], [[0,1],[0,2]]]) # c = np.array([[(1.,2.,3.),(2.,3.,4.),(3.,4.,5.),(4.,5.,6.)],[(1.,2.,3.),(2.,2.,2.),(3.,4.,5.),(4.,5.,6.)]]) # #print a # print b # print c # print type(b) # #y = tf.multiply(x,) # tmp = tf.expand_dims(z,0) # sa = tf.shape(x) # sb = tf.shape(y) # sc = tf.shape(z) # s = tf.shape(tmp) # # #q = tf.matmul(x, tmp) # #sd = tf.shape(q) # # r = tf.gather_nd(c,b) # sr = tf.shape(r) # #print np.shape(a) # #print np.shape(b) # with tf.Session() as sess: # sb,sc,s,tmp,r,sr= sess.run([sb,sc,s,tmp,r,sr], feed_dict={x:a,y:b,z:c}) # print sb # print sc # #print q # print r # print sr # #return result class Data_tree(object): def __init__(self, tree, start_node): self.tree = tree self.start_node = start_node self.q_id = tree.raw_tree_data['tree_id'] self.q_type = tree.raw_tree_data['question_type'] self.words_id_list = tree.raw_tree_data['passage_token_id'] self.l_passage = tree.raw_tree_data['p_length'] self.ref_answer = tree.raw_tree_data['ref_answer'] self.p_data = [] self.listSelectedSet = [] self.value = 0 self.select_list = [] self.p_word_id, self.p_pred = [],[] self.tmp_node = None self.expand_node = None self.num_of_search = 0 self.result_value = 0 class PSCHTree(object): """ python -u run.py --train --algo MCST --epochs 1 --gpu 2 --max_p_len 2000 --hidden_size 150 --train_files ../data/demo/trainset/search.train.json --dev_files ../data/demo/devset/search.dev.json --test_files ../data/demo/test/search.test.json nohup python -u run.py --train --algo BIDAF --epochs 10 --train_files ../data/demo/trainset/test_5 --dev_files ../data/demo/devset/test_5 --test_files ../data/demo/test/search.test.json >test5.txt 2>&1 & nohup python -u run.py --train --algo MCST --epochs 100 --gpu 3 --max_p_len 1000 --hidden_size 150 --train_files ../data/demo/trainset/search.train.json --dev_files ../data/demo/devset/search.dev.json --test_files ../data/demo/test/search.test.json >test_313.txt 2>&1 & """ def __init__(self, args, vocab): self.vocab = vocab # logging self.logger = logging.getLogger("brc") # basic config self.algo = args.algo self.hidden_size = args.hidden_size self.optim_type = args.optim self.learning_rate = args.learning_rate self.weight_decay = args.weight_decay self.use_dropout = args.dropout_keep_prob < 1 self.dropout_keep_prob = 1.0 self.evluation_m = 'Rouge-L' #'Bleu-1','Bleu-2,3,4' # length limit self.max_p_num = args.max_p_num self.max_p_len = args.max_p_len self.max_q_len = args.max_q_len # self.max_a_len = args.max_a_len self.max_a_len = 3 # test paras self.search_time = 4 self.beta = 100.0 self._build_graph() def _init_sub_tree(self,tree): print '------- init sub tree :' + str(tree['tree_id']) + '---------' start_node = 'question_' + str(tree['tree_id']) mcts_tree = sub_tree(tree) data_tree = Data_tree(mcts_tree, start_node) data_tree.num_of_search += 1 return data_tree def _do_init_tree_job(self, lock,trees_to_accomplish, trees_that_are_done, log): while True: try: ''' try to get task from the queue. get_nowait() function will raise queue.Empty exception if the queue is empty. queue(False) function would do the same task also. ''' with lock: tree = trees_to_accomplish.get_nowait() except Queue.Empty: break else: ''' if no exception has been raised, add the task completion message to task_that_are_done queue ''' #result = self._init_sub_tree(tree) print '------- init sub tree :' + str(tree['tree_id']) + '---------' start_node = 'question_' + str(tree['tree_id']) mcts_tree = sub_tree(tree) data_tree = Data_tree(mcts_tree, start_node) data_tree.num_of_search += 1 lock.acquire() try: log.put(str(tree['tree_id']) + ' is done by ' + str(mp.current_process().name)) trees_that_are_done.put(data_tree) finally: lock.release() #time.sleep(.5) return True def _search_sub_tree(self, data_tree): sub_tree = data_tree.tree #print '------- search sub tree :' + str(sub_tree.q_id) + '---------' start_node_id = data_tree.start_node data_tree.num_of_search +=1 data_tree.select_list=[start_node_id] tmp_node = sub_tree.tree.get_node(start_node_id) while not tmp_node.is_leaf(): max_score = float("-inf") max_id = -1 for child_id in tmp_node.fpointer: child_node = sub_tree.tree.get_node(child_id) # score = child_node.data.p score = self.beta * child_node.data.p * ((1 + sub_tree.count) / (1 + child_node.data.num)) if score > max_score: max_id = child_id max_score = score data_tree.select_list.append(max_id) tmp_node = sub_tree.tree.get_node(max_id) data_tree.tmp_node = tmp_node return data_tree def _do_search_tree_job(self, lock, trees_to_accomplish, trees_that_are_done, log): while True: try: ''' try to get task from the queue. get_nowait() function will raise queue.Empty exception if the queue is empty. queue(False) function would do the same task also. ''' lock.acquire() try: data_tree = trees_to_accomplish.get_nowait() #print ('_do_search_tree_job', type(data_tree)) finally: lock.release() except Queue.Empty: break else: ''' if no exception has been raised, add the task completion message to task_that_are_done queue ''' #result = self._search_sub_tree(tree) sub_tree = data_tree.tree #print '------- search sub tree :' + str(sub_tree.q_id) + '---------' start_node_id = data_tree.start_node data_tree.num_of_search += 1 data_tree.select_list = [start_node_id] tmp_node = sub_tree.tree.get_node(start_node_id) while not tmp_node.is_leaf(): max_score = float("-inf") max_id = -1 for child_id in tmp_node.fpointer: child_node = sub_tree.tree.get_node(child_id) # score = child_node.data.p score = self.beta * child_node.data.p * ((1 + sub_tree.count) / (1 + child_node.data.num)) if score > max_score: max_id = child_id max_score = score data_tree.select_list.append(max_id) tmp_node = sub_tree.tree.get_node(max_id) data_tree.tmp_node = tmp_node lock.acquire() try: log.put(str(data_tree.tmp_node) + ' is selected by ' + str(mp.current_process().name)) trees_that_are_done.put(data_tree) finally: lock.release() return True def _do_tree_action_job(self, lock,trees_to_accomplish, action_result_queue, log): while True: try: ''' try to get task from the queue. get_nowait() function will raise queue.Empty exception if the queue is empty. queue(False) function would do the same task also. ''' lock.acquire() try: data_tree = trees_to_accomplish.get_nowait() finally: lock.release() except Queue.Empty: break else: ''' if no exception has been raised, add the task completion message to task_that_are_done queue ''' #result = self._aciton_tree(tree) #result = tree prob, select_word_id, start_node = self._take_action(data_tree) data_tree.start_node = start_node data_tree.p_data.append(prob) data_tree.listSelectedSet.append(select_word_id) lock.acquire() try: log.put(str(data_tree.listSelectedSet) + ' is list of action choosen by ' + str(mp.current_process().name)) action_result_queue.put(data_tree) finally: lock.release() return True def feed_in_batch(self, tree_batch, parallel_size,feed_dict): self.tree_batch = tree_batch self.para_size = parallel_size self.batch_size = len(self.tree_batch['tree_ids']) #self.feed_dict = feed_dict def tree_search(self): trees = [] #test_tf() time_tree_start = time.time() #1)initialize trees for bitx in range(self.batch_size): #print '-------------- yeild ' + str(bitx) + '-------------' if self.tree_batch['p_length'][bitx] > self.max_p_len: #print '>>>>>>>>>>>>>>>> ' self.tree_batch['p_length'][bitx] = self.max_p_len self.tree_batch['candidates'][bitx] = self.tree_batch['candidates'][bitx][:(self.max_p_len)] #??? tree = {'tree_id': self.tree_batch['tree_ids'][bitx], 'question_token_ids': self.tree_batch['root_tokens'][bitx], 'passage_token_id': self.tree_batch['candidates'][bitx], 'q_length': self.tree_batch['q_length'][bitx], 'p_length': self.tree_batch['p_length'][bitx], 'question_type': self.tree_batch['question_type'][bitx], 'ref_answer': self.tree_batch['ref_answers'][bitx] #'mcst_model':self.tree_batch['mcst_model'] } trees.append(tree) print ('Max parallel processes size: ', self.para_size) number_of_task = self.batch_size number_of_procs = self.para_size manager = mp.Manager() trees_to_accomplish = manager.Queue() trees_that_are_done = manager.Queue() log = mp.Queue() processes = [] lock = manager.Lock() for i in trees: trees_to_accomplish.put(i) # creating processes for w in range(number_of_procs): p = mp.Process(target=self._do_init_tree_job, args=(lock,trees_to_accomplish, trees_that_are_done,log)) processes.append(p) p.start() # completing process for p in processes: p.join() # while not log.empty(): # 1==1 # print(log.get()) # for i,p in enumerate(processes): # if not p.is_alive(): # print ("[MAIN]: WORKER is a goner", i) # init the root node and expand the root node self.tree_list = [] self.finished_tree = [] init_list = [] while not trees_that_are_done.empty(): now_tree = trees_that_are_done.get() now_tree.expand_node = now_tree.tree.tree.get_node(now_tree.tree.tree.root) init_list.append(now_tree) self.tree_list = self.expands(init_list) # search tree for t in xrange(self.max_a_len): print ('Answer_len', t) if len(self.tree_list) == 0: break for data_tree in self.tree_list: has_visit_num = 0.0 tmp_node = data_tree.tree.tree.get_node(data_tree.start_node) for child_id in tmp_node.fpointer: child_node = data_tree.tree.tree.get_node(child_id) has_visit_num += child_node.data.num data_tree.tree.count = has_visit_num #search_time =int(self.search_time- has_visit_num) for s_time in range(self.search_time): print ('search time', s_time) # creating processes processes_search = [] tree_search_queue = manager.Queue() tree_result_queue = manager.Queue() for tree in self.tree_list: #print ('type', type(tree)) tree_search_queue.put(tree) search_tree_list = [] for w in range(number_of_procs): p = mp.Process(target=self._do_search_tree_job, args=(lock, tree_search_queue, tree_result_queue, log)) processes_search.append(p) p.start() time.sleep(0.1) while 1: if not tree_result_queue.empty(): data_tree = tree_result_queue.get() search_tree_list.append(data_tree) if len(search_tree_list) == number_of_procs: break #time.sleep(0.1) # completing process for p in processes_search: #p.join() p.terminate() # while not log.empty(): # 1==1 # print(log.get()) self.tree_list = [] #gather train data self.tree_list = self._search_vv(search_tree_list) tree_need_expand_list = [] tree_no_need_expand_list = [] for data_tree in self.tree_list: data_tree_update = self._updates(data_tree) tmp_node = data_tree_update.tmp_node l_passage = data_tree_update.l_passage #??? word_id = int(tmp_node.data.word[-1]) if tmp_node.is_leaf() and (word_id < (l_passage)): data_tree_update.expand_node = tmp_node tree_need_expand_list.append(data_tree_update) else: tree_no_need_expand_list.append(data_tree_update) self.tree_list = self.expands(tree_need_expand_list) self.tree_list = self.tree_list + tree_no_need_expand_list print '%%%%%%%%%%%%%%%%%%% start take action %%%%%%%%%%%%%%' num_action_procs = 0 self.finished_tree = [] action_queue = manager.Queue() action_result_queue = manager.Queue() for data_tree in self.tree_list: #print ('######### tree.listSelectedSet: ', data_tree.listSelectedSet) if not len(data_tree.listSelectedSet) == 0 : last_word = data_tree.listSelectedSet[-1] if not last_word == str(data_tree.l_passage): action_queue.put(data_tree) num_action_procs +=1 else: self.finished_tree.append(data_tree) else: action_queue.put(data_tree) num_action_procs += 1 action_tree_list = [] processes_action = [] #print ('###start take action ') #print ('len(self.tree_list)', len(self.tree_list)) for w in range(num_action_procs): #print (w, w) p = mp.Process(target=self._do_tree_action_job, args=(lock, action_queue, action_result_queue, log)) processes_action.append(p) p.start() time.sleep(0.1) # completing process while 1: #time.sleep(0.1) if not action_result_queue.empty(): data_tree = action_result_queue.get() action_tree_list.append(data_tree) if len(action_tree_list) == num_action_procs: break for p in processes_action: p.terminate() # while not log.empty(): # print(log.get()) self.tree_list = action_tree_list for selection in action_tree_list: print ('selection', selection.listSelectedSet) print '%%%%%%%%%%%%%% end take action %%%%%%%%%%%%%%%' for t in self.tree_list: self.finished_tree.append(t) time_tree_end = time.time() print ('&&&&&&&&&&&&&&& tree search time = %3.2f s &&&&&&&&&&&&' %(time_tree_end-time_tree_start)) print ('--------------- end tree:', len(self.finished_tree)) #create nodes --->search until finish ---- pred_answers,ref_answers = [],[] for data_tree in self.finished_tree: p_words_list = data_tree.words_id_list listSelectedSet_words = [] listSelectedSet = map(eval, data_tree.listSelectedSet) for idx in listSelectedSet: listSelectedSet_words.append(p_words_list[idx]) strr123 = self.vocab.recover_from_ids(listSelectedSet_words, 0) pred_answers.append({'question_id': data_tree.q_id, 'question_type': data_tree.q_type, 'answers': [''.join(strr123)], 'entity_answers': [[]], 'yesno_answers': []}) ref_answers.append({'question_id': data_tree.q_id, 'question_type': data_tree.q_type, 'answers': data_tree.ref_answer, 'entity_answers': [[]], 'yesno_answers': []}) if len(ref_answers) > 0: pred_dict, ref_dict = {}, {} for pred, ref in zip(pred_answers, ref_answers): question_id = ref['question_id'] if len(ref['answers']) > 0: pred_dict[question_id] = normalize(pred['answers']) ref_dict[question_id] = normalize(ref['answers']) bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict) else: bleu_rouge = None value_with_mcts = bleu_rouge print 'bleu_rouge(value_with_mcts): ' print value_with_mcts data_tree.result_value = value_with_mcts print '============= start compute loss ===================' loss_time_start = time.time() first_sample_list = [] sample_list = [] #save lists of feed_dict p_list,q_list = [], [] p_length, q_length= [], [] p_data_list = [] pad = 0 # selected_words_list = [] # candidate_words_list = [] for t_id, data_tree in enumerate(self.finished_tree,0): tree_data = data_tree.tree.get_raw_tree_data() listSelectedSet = data_tree.listSelectedSet pad = [t_id, len(tree_data['passage_token_id'])-1] #print ('pad',pad) words_list = [i for i in range(data_tree.l_passage+1)] for prob_id, prob_data in enumerate(data_tree.p_data): # print 'p_data: ' # print prob_id # print prob_data c = [] policy = [] for prob_key, prob_value in prob_data.items(): c.append(prob_key) policy.append(prob_value) # print ('tree_id', data_tree.q_id) # print 'listSelectedSet[:prob_id]' # print listSelectedSet[:prob_id] # print 'policy: ' # print policy # print 'sum_policy' # print np.sum(policy) # print 'shape_policy' # print np.shape(policy) # print 'value: ' # print data_tree.result_value['Rouge-L'] # print 'candidate: ' # print c if prob_id == 0: input_v = data_tree.result_value['Rouge-L'] feed_dict = {self.p: [tree_data['passage_token_id']], self.q: [tree_data['question_token_ids']], self.p_length: [tree_data['p_length']], self.q_length: [tree_data['q_length']], self.words_list: words_list, self.dropout_keep_prob: 1.0} feeddict = dict(feed_dict.items() + {self.policy: [policy], self.v: [[input_v]]}.items()) first_sample_list.append(feeddict) _,loss_first = self.sess.run([self.optimizer_first,self.loss_first], feed_dict=feeddict) print('loss,first', loss_first) else: p_list.append(tree_data['passage_token_id']) q_list.append(tree_data['question_token_ids']) p_length.append(tree_data['p_length']) q_length.append(tree_data['q_length']) p_data_list.append([t_id,listSelectedSet[:prob_id], c, policy, data_tree.result_value[self.evluation_m]]) # for sample in first_sample_list: # loss_first = self.sess.run(self.loss_first, feed_dict=sample) # print('loss,first', loss_first) # for sample in sample_list: policy_c_id_list = [] fd_selected_list = [] selected_length_list = [] candidate_length_list = [] fd_policy_c_id_list = [] policy_list = [] value_list = [] for idx, sample in enumerate(p_data_list, 0): #print ('sample', sample) t_id = sample[0] selected_words = sample[1] candidate_words = sample[2] policy = sample[3] value = sample[4] selected_words = map(eval, selected_words) tmp = [] for word in selected_words: tmp.append([t_id, word]) fd_selected_list.append(tmp) selected_length_list.append(len(selected_words)) candidate_words = map(eval, candidate_words) tmp2 = [] for word2 in candidate_words: tmp2.append([t_id, word2]) fd_policy_c_id_list.append(tmp2) # no order version candidate_length_list.append(len(candidate_words)) assert len(candidate_words) == len(policy) policy_list.append(policy) value_list.append(value) fd_selected_list = self._pv_padding(fd_selected_list, selected_length_list, pad) fd_policy_c_id_list = self._pv_padding(fd_policy_c_id_list, candidate_length_list, pad) policy_list = self._pv_padding(policy_list, candidate_length_list, 0.0) if not (len(policy_list)) == 0: feed_dict = {self.p: p_list, self.q: q_list, self.p_length: p_length, self.q_length: q_length, self.dropout_keep_prob: 1.0} feeddict = dict(feed_dict.items() + { self.selected_id_list: fd_selected_list, self.seq_length:selected_length_list, self.selected_batch_size : len(selected_length_list), self.candidate_id: fd_policy_c_id_list, self.candidate_batch_size: [len(fd_policy_c_id_list),1,1], self.policy: policy_list, self.v: [value_list]}.items()) # print ('shape of p_list',np.shape(p_list)) # print ('shape of q_list', np.shape(q_list)) # print ('shape of p_length', np.shape(p_length)) # print ('shape of q_length', np.shape(q_length)) # print ('shape of fd_selected_list', np.shape(fd_selected_list)) # print ('shape of selected_length_list', np.shape(selected_length_list)) # print ('shape of selected_batch_size', np.shape(len(selected_length_list))) # print ('shape of fd_policy_c_id_list', np.shape(fd_policy_c_id_list)) # print ('shape of candidate_batch_size', np.shape([len(fd_policy_c_id_list),1,1])) # print ('shape of policy_list', np.shape(policy_list)) # print ('shape of [value_list]', np.shape([value_list])) #print ('shape of ', np.shape()) _, loss = self.sess.run([self.optimizer,self.loss], feed_dict=feeddict) loss_time_end = time.time() print('loss',loss) print ('time of computer loss is %3.2f s' %(loss_time_end-loss_time_start)) print '==================== end computer loss ================ ' # loss = self.sess.run(self.loss, feed_dict=feeddict) # print('loss',loss) # total_loss += loss * len(self.finished_tree) # total_num += len(self.finished_tree) # n_batch_loss += loss # if log_every_n_batch > 0 and bitx % log_every_n_batch == 0: # self.logger.info('Average loss from batch {} to {} is {}'.format( # bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch)) #return 1.0 * total_loss / total_num return 0 def _pv_padding(self, padding_list, seq_length_list, pad): padding_length = 0 for length in seq_length_list: padding_length = max(padding_length,length) #print('padding_length',padding_length) for idx, sub_list in enumerate(padding_list,0): #you yade gaixi #print ('sublist [-1]', sub_list[-1]) rangee = padding_length - seq_length_list[idx] for i in range(rangee): sub_list.append(pad) for sub_list in padding_list: assert len(sub_list) == padding_length return padding_list def expands(self, tree_list): print ('============= start expands ==============') time_expend_start = time.time() p_feed = [] q_feed = [] p_lenth_feed = [] q_length_feed = [] words_list_list = [] l_passage_list = [] policy_need_list = [] for t_idx, data_tree in enumerate(tree_list,0): tree_data = data_tree.tree.get_raw_tree_data() word_list = data_tree.expand_node.data.word l_passage = data_tree.l_passage #print ('1word_list', word_list) if(len(word_list) == 0): data_tree = self._get_init_policy(data_tree,l_passage+1) else: p_feed.append(tree_data['passage_token_id']) q_feed.append(tree_data['question_token_ids']) p_lenth_feed.append(tree_data['p_length']) q_length_feed.append(tree_data['q_length']) words_list_list.append(data_tree.expand_node.data.word) l_passage_list.append((data_tree.l_passage+1)) policy_need_list.append(t_idx) if not (len(p_feed) == 0): feed_dict = {self.p: p_feed, self.q: q_feed, self.p_length: p_lenth_feed, self.q_length: q_length_feed, self.dropout_keep_prob: 1.0} policy_ids, policys = self._cal_policys(words_list_list,l_passage_list,feed_dict) for p_idx, t_idx in enumerate(policy_need_list, 0): tree_list[t_idx].p_pred = policys[p_idx] tree_list[t_idx].p_word_id = policy_ids[p_idx] for d_tree in tree_list: leaf_node = d_tree.expand_node words_list = leaf_node.data.word #print ('words_list', words_list) for idx, word in enumerate(d_tree.p_word_id,0): #print ('word ', word) d_tree.tree.node_map[' '.join(words_list + [str(word)])] = len(d_tree.tree.node_map) #print ('node_map', d_tree.tree.node_map) new_node = node() new_node.word = words_list + [str(word)] #idx = d_tree.p_word_id.index(word) new_node.p = d_tree.p_pred[idx] # print 'new_node.p ' + str(new_node.p) id = d_tree.tree.node_map[' '.join(new_node.word)] #print 'identifier******************* ' + str(id) d_tree.tree.tree.create_node(identifier= id , data=new_node, parent=leaf_node.identifier) time_expand_end = time.time() print ('time of expand is %3.2f s' %(time_expand_end-time_expend_start)) print ('================= end expands ==============') return tree_list def _get_init_policy(self, data_tree, l_passage): #print('&&&&&&&&& start init_policy &&&&&&&&') tree = data_tree.tree tree_data = tree.get_raw_tree_data() words_list = [i for i in range(l_passage)] feed_dict = {self.p: [tree_data['passage_token_id']], self.q: [tree_data['question_token_ids']], self.p_length: [tree_data['p_length']], self.q_length: [tree_data['q_length']], self.words_list: words_list, self.dropout_keep_prob: 1.0} # print ('length of passage', tree_data['p_length']) # print ('length of padding passage',len(tree_data['passage_token_id'])) # print ('padding',tree_data['passage_token_id'][-1]) data_tree.p_pred = self.sess.run(self.prob_first, feed_dict=feed_dict) data_tree.p_word_id = [i for i in range(l_passage)] #print('&&&&&&&&& end init_policy &&&&&&&&') return data_tree def _get_init_value(self, data_tree): #print('$$$$$$$ start init_value $$$$$$$$$') tree = data_tree.tree tree_data = tree.get_raw_tree_data() feed_dict = {self.p: [tree_data['passage_token_id']], self.q: [tree_data['question_token_ids']], self.p_length: [tree_data['p_length']], self.q_length: [tree_data['q_length']], self.dropout_keep_prob: 1.0} value_p = self.sess.run(self.value_first, feed_dict=feed_dict) # print ('_get_init_value',value_p) # print('$$$$$$$ end init_value $$$$$$$$$') return value_p def _search_vv(self, search_tree_list): start = time.time() print ('--------------------- start search_vv ------------------------') value_id_list = [] p_feed = [] q_feed = [] p_lenth_feed = [] q_length_feed = [] words_list_list = [] for t_id,data_tree in enumerate(search_tree_list,0): tree_data = data_tree.tree.get_raw_tree_data() tmp_node = data_tree.tmp_node word_id = int(tmp_node.data.word[-1]) l_passage = data_tree.l_passage ##??? words_list = tmp_node.data.word if len(words_list) == 0: data_tree.value = self._get_init_value(data_tree) else: #print ('word_id', word_id) if (word_id == (l_passage)): v = 0 pred_answer = tmp_node.data.word listSelectedSet_words = [] listSelectedSet = map(eval, pred_answer) # print listSelectedSet for idx in listSelectedSet: listSelectedSet_words.append(data_tree.words_id_list[idx]) str123 = self.vocab.recover_from_ids(listSelectedSet_words, 0) pred_answers = [] ref_answers = [] pred_answers.append({'question_id': data_tree.q_id, 'question_type': data_tree.q_type, 'answers': [''.join(str123)], 'entity_answers': [[]], 'yesno_answers': []}) ref_answers.append({'question_id': data_tree.q_id, 'question_type': data_tree.q_type, 'answers': data_tree.ref_answer, 'entity_answers': [[]], 'yesno_answers': []}) print '**************** tree_search get end id ***************' if len(data_tree.ref_answer) > 0: pred_dict, ref_dict = {}, {} for pred, ref in zip(pred_answers, ref_answers): question_id = ref['question_id'] if len(ref['answers']) > 0: pred_dict[question_id] = normalize(pred['answers']) ref_dict[question_id] = normalize(ref['answers']) bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict) else: bleu_rouge = None v = bleu_rouge[self.evluation_m] print ('v: ', v) data_tree.v = v else: p_feed.append(np.array(tree_data['passage_token_id'])) q_feed.append(np.array(tree_data['question_token_ids'])) p_lenth_feed.append(np.array(tree_data['p_length'])) q_length_feed.append(np.array(tree_data['q_length'])) words_list_list.append(words_list) value_id_list.append(t_id) if not (len(p_feed)) == 0: self.feed_dict = {self.p: p_feed, self.q: q_feed, self.p_length: p_lenth_feed, self.q_length: q_length_feed, self.dropout_keep_prob: 1.0} values = self._cal_values(words_list_list, self.feed_dict) for t_idx,v_idx in enumerate(value_id_list, 0): search_tree_list[v_idx].value = values[t_idx] end = time.time() print ('search time: %3.2f s' %(end - start)) print('----------------- end search_vv ' + str(end) + '------------------') return search_tree_list def _cal_values(self, words_list_list, feeddict): fd_words_list = [] seq_length = [] for idx, words_list in enumerate(words_list_list,0): words_list = map(eval, words_list) tp = [] for word in words_list: tp = np.array([idx,word]) fd_words_list.append(tp) seq_length.append(np.array(len(words_list))) fd_words_list = np.array(fd_words_list) seq_length = np.array(seq_length) feed_dict = dict({self.selected_id_list: fd_words_list, self.seq_length: seq_length, self.selected_batch_size : len(seq_length)}.items() + feeddict.items()) values = self.sess.run(self.value, feed_dict=feed_dict) #print ('values',values) return values def _updates(self,data_tree): node_list = data_tree.select_list value = data_tree.value for node_id in node_list: tmp_node = data_tree.tree.tree.get_node(node_id) tmp_node.data.Q = (tmp_node.data.Q * tmp_node.data.num + value) / (tmp_node.data.num + 1) tmp_node.data.num += 1 data_tree.tree.count += 1 return data_tree def _get_policy(self, data_tree): sub_tree = data_tree.tree start_node_id = data_tree.start_node tmp_node = sub_tree.tree.get_node(start_node_id) max_time = -1 prob = {} for child_id in tmp_node.fpointer: child_node = sub_tree.tree.get_node(child_id) if sub_tree.count == 0: prob[child_node.data.word[-1]] = 0.0 else: prob[child_node.data.word[-1]] = child_node.data.num / sub_tree.count return prob def _take_action(self, data_tree): sub_tree = data_tree.tree start_node_id = data_tree.start_node tmp_node = sub_tree.tree.get_node(start_node_id) max_time = -1 prob = {} for child_id in tmp_node.fpointer: child_node = sub_tree.tree.get_node(child_id) prob[child_node.data.word[-1]] = child_node.data.num / sub_tree.count if child_node.data.num > max_time: max_time = child_node.data.num select_word = child_node.data.word[-1] select_word_node_id = child_node.identifier return prob, select_word, select_word_node_id def _policy_padding(self, padding_list, seq_length_list, pad): padding_length = 0 for length in seq_length_list: padding_length = max(padding_length,length) #print('padding_length',padding_length) for idx, sub_list in enumerate(padding_list,0): #you yade gaixi #print ('sublist [-1]', sub_list[-1]) #padding = [sub_list[-1][0],(sub_list[-1][1])] #print ('padding',padding) rangee = padding_length - seq_length_list[idx] for i in range(rangee): sub_list.append(pad) for sub_list in padding_list: assert len(sub_list) == padding_length return padding_list def _cal_policys(self, words_list_list, l_passage_list, feeddict): policy_c_id_list = [] fd_words_list = [] seq_length_list = [] candidate_length_list = [] fd_policy_c_id_list = [] for idx, words_list in enumerate(words_list_list,0): max_id = float('-inf') policy_c_id = [] words_list = map(eval, words_list) tmp = [] for word in words_list: tmp.append([idx, word]) fd_words_list.append(tmp) seq_length_list.append(len(words_list)) for can in words_list: max_id = max(can, max_id) for i in range(l_passage_list[idx]): if i > max_id: policy_c_id.append(i) pad = [idx,l_passage_list[idx]-1] candidate_length_list.append(len(policy_c_id)) policy_c_id_list.append(policy_c_id) tmp2 = [] for word in policy_c_id: tmp2.append([idx, word]) fd_policy_c_id_list.append(tmp2) #print ('start_padding', candidate_length_list) fd_policy_c_id_list = self._policy_padding(fd_policy_c_id_list,candidate_length_list,pad) selected_batch_size = len(fd_words_list) candidate_batch_size = [len(fd_policy_c_id_list),1,1] feed_dict = dict( {self.selected_id_list: fd_words_list, self.candidate_id: fd_policy_c_id_list, self.seq_length: seq_length_list, self.selected_batch_size : selected_batch_size, self.candidate_batch_size: candidate_batch_size}.items() + feeddict.items()) #print feed_dict c_pred = self.sess.run(self.prob, feed_dict=feed_dict) #print ('can', c_pred) #print ('shape of pre ', np.shape(c_pred)) # for x in c_pred: # print ('x',np.sum(x)) #c_pred = self.sess.run(self.prob, feed_dict=feed_dict) return policy_c_id_list, c_pred def _build_graph(self): """ Builds the computation graph with Tensorflow """ # session info sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True self.sess = tf.Session(config=sess_config) start_t = time.time() self._setup_placeholders() self._embed() self._encode() self._initstate() self._action_frist() self._action() self._compute_loss() # param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params]) # self.logger.info('There are {} parameters in the model'.format(param_num)) self.saver = tf.train.Saver() self.sess.run(tf.global_variables_initializer()) self.logger.info('Time to build graph: {} s'.format(time.time() - start_t)) def _setup_placeholders(self): """ Placeholders """ self.p = tf.placeholder(tf.int32, [None, None]) self.q = tf.placeholder(tf.int32, [None, None]) self.p_length = tf.placeholder(tf.int32, [None]) self.q_length = tf.placeholder(tf.int32, [None]) self.dropout_keep_prob = tf.placeholder(tf.float32) # test self.words_list = tf.placeholder(tf.int32, [None]) self.candidate_id = tf.placeholder(tf.int32, None) self.seq_length = tf.placeholder(tf.int32, [None]) self.selected_batch_size = tf.placeholder(tf.int32,None) self.candidate_batch_size = tf.placeholder(tf.int32, None) # self.words = tf.placeholder(tf.float32, [None, None]) self.selected_id_list = tf.placeholder(tf.int32, None) self.policy = tf.placeholder(tf.float32, [None, None]) # policy self.v = tf.placeholder(tf.float32, [1,None]) # value def _embed(self): """ The embedding layer, question and passage share embeddings """ # with tf.device('/cpu:0'), tf.variable_scope('word_embedding'): with tf.variable_scope('word_embedding'): self.word_embeddings = tf.get_variable( 'word_embeddings', shape=(self.vocab.size(), self.vocab.embed_dim), initializer=tf.constant_initializer(self.vocab.embeddings), trainable=True ) self.p_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p) self.q_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q) def _encode(self): """ Employs two Bi-LSTMs to encode passage and question separately """ with tf.variable_scope('passage_encoding'): self.p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size) with tf.variable_scope('question_encoding'): _, self.sep_q_encodes = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size) #self.sep_q_encodes,_ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size) if self.use_dropout: self.p_encodes = tf.nn.dropout(self.p_encodes, self.dropout_keep_prob) self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob) def _initstate(self): self.V = tf.Variable( tf.random_uniform([self.hidden_size * 2, self.hidden_size * 2], -1. / self.hidden_size, 1. / self.hidden_size)) self.W = tf.Variable(tf.random_uniform([self.hidden_size * 2, 1], -1. / self.hidden_size, 1. / self.hidden_size)) self.W_b = tf.Variable(tf.random_uniform([1, 1], -1. / self.hidden_size, 1. / self.hidden_size)) self.V_c = tf.Variable( tf.random_uniform([self.hidden_size * 2, self.hidden_size], -1. / self.hidden_size, 1. / self.hidden_size)) self.V_h = tf.Variable( tf.random_uniform([self.hidden_size * 2, self.hidden_size], -1. / self.hidden_size, 1. / self.hidden_size)) self.q_state_c = tf.sigmoid(tf.matmul(self.sep_q_encodes, self.V_c)) self.q_state_h = tf.sigmoid(tf.matmul(self.sep_q_encodes, self.V_h)) self.q_state = tf.concat([self.q_state_c, self.q_state_h], 1) #(3,300) self.words = tf.reshape(self.p_encodes, [-1, self.hidden_size * 2]) def _action_frist(self): """ select first word """ # self.candidate = tf.reshape(self.p_emb,[-1,self.hidden_size*2]) self.words_in = tf.gather(self.words, self.words_list) self.w = tf.matmul(self.words_in, self.V) self.tmp = tf.matmul(self.w, tf.transpose(self.q_state)) self.logits_first = tf.reshape(self.tmp, [-1]) self.prob_first = tf.nn.softmax(self.logits_first) self.prob_id_first = tf.argmax(self.prob_first) self.value_first = tf.sigmoid(tf.reshape(tf.matmul(self.q_state, self.W), [1, 1]) + self.W_b) # [1,1] def _action(self): """ Employs Bi-LSTM again to fuse the context information after match layer """ #self.selected_id_list = tf.expand_dims(self.selected_id_list, 0) self.candidate = tf.gather_nd(self.p_encodes, self.candidate_id) self.shape_a = tf.shape(self.seq_length) self.selected_list = tf.gather_nd(self.p_encodes, self.selected_id_list) self.rnn_input = tf.reshape(self.selected_list, [self.selected_batch_size, -1, self.hidden_size * 2]) # (6,2,300) # rnn_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.hidden_size, state_is_tuple=False) _, self.states = tf.nn.dynamic_rnn(rnn_cell, self.rnn_input, sequence_length = self.seq_length , initial_state=self.q_state, dtype=tf.float32) # [1, dim] #(6,300) self.value = tf.sigmoid(tf.matmul(self.states, self.W) + self.W_b) # [6,1] #self.value = tf.sigmoid(tf.reshape(tf.matmul(self.states, self.W), [1, 1]) + self.W_b) # [1,1] self.VV = tf.expand_dims(self.V, 0) self.VVV = tf.tile(self.VV, self.candidate_batch_size) self.can = tf.matmul(self.candidate, self.VVV) #self.s_states = tf.reshape(self.states, [self.candidate_batch_size, self.hidden_size * 2, 1]) self.s_states = tf.expand_dims(self.states, 2) # self.shape_a = tf.shape(self.can) # self.shape_b = tf.shape(self.s_states) self.logits = tf.matmul(self.can, self.s_states) self.prob = tf.nn.softmax(self.logits,dim = 1)# (6,458,1) self.prob_id = tf.argmax(self.prob) def _compute_loss(self): """ The loss function """ self.loss_first = tf.contrib.losses.mean_squared_error(self.v, self.value_first) - \ tf.matmul(self.policy,tf.reshape(tf.log( tf.clip_by_value(self.prob_first, 1e-30,1.0)),[-1, 1])) self.optimizer_first = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss_first) self.loss = tf.reduce_mean(tf.contrib.losses.mean_squared_error(tf.transpose(self.v), self.value) - tf.reduce_sum(tf.multiply(self.policy, tf.reshape( tf.log(tf.clip_by_value(self.prob, 1e-30, 1.0)), [self.selected_batch_size, -1])),1)) self.optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss) self.all_params = tf.trainable_variables() def _create_train_op(self): """ Selects the training algorithm and creates a train operation with it """ if self.optim_type == 'adagrad': self.optimizer = tf.train.AdagradOptimizer(self.learning_rate) elif self.optim_type == 'adam': self.optimizer = tf.train.AdamOptimizer(self.learning_rate) elif self.optim_type == 'rprop': self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate) elif self.optim_type == 'sgd': self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate) else: raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type)) self.train_op = self.optimizer.minimize(self.loss) if __name__ == '__main__': 1 == 1 #tree_search() test_tf() #tree_search()
<gh_stars>1-10 import numpy as np import time import cv2 import torch from torch.autograd import Variable import OCR.lib.utils.utils as utils import OCR.lib.models.crnn as crnn import OCR.lib.config.alphabets as alphabets import yaml from easydict import EasyDict as edict import argparse def parse_arg(): parser = argparse.ArgumentParser(description="demo") parser.add_argument('--cfg', help='experiment configuration filename', type=str, default='OCR/lib/config/360CC_config.yaml') parser.add_argument('--image_path', type=str, default='OCR/images/for_ocr.png', help='the path to image') parser.add_argument('--checkpoint', type=str, default='OCR/output/checkpoints/mixed_second_finetune_acc_97P7.pth', help='the path to checkpoints') args = parser.parse_args() with open(args.cfg, 'r') as f: config = yaml.load(f,Loader=yaml.FullLoader) config = edict(config) config.DATASET.ALPHABETS = alphabets.alphabet config.MODEL.NUM_CLASSES = len(config.DATASET.ALPHABETS) return config, args def recognition(config, img, model, converter, device): h, w = img.shape img = cv2.resize(img, (0, 0), fx=config.MODEL.IMAGE_SIZE.H / h, fy=config.MODEL.IMAGE_SIZE.H / h, interpolation=cv2.INTER_CUBIC) h, w = img.shape w_cur = int(img.shape[1] / (config.MODEL.IMAGE_SIZE.OW / config.MODEL.IMAGE_SIZE.W)) img = cv2.resize(img, (0, 0), fx=w_cur / w, fy=1.0, interpolation=cv2.INTER_CUBIC) img = np.reshape(img, (config.MODEL.IMAGE_SIZE.H, w_cur, 1)) img = img.astype(np.float32) img = (img / 255. - config.DATASET.MEAN) / config.DATASET.STD img = img.transpose([2, 0, 1]) img = torch.from_numpy(img) img = img.to(device) img = img.view(1, *img.size()) model.eval() preds = model(img) preds1 = preds.squeeze() #print(preds.shape) _, preds = preds.max(2) preds = preds.transpose(1, 0).contiguous().view(-1) #softmax = torch.index_select(preds1,0,preds[preds > 0]) #softmax get index = torch.nonzero(torch.gt(preds,torch.tensor([0]).cuda())).squeeze() preds_size = Variable(torch.IntTensor([preds.size(0)])) sim_pred = converter.decode(preds.data, preds_size.data, raw=False) log_softmax = torch.index_select(preds1.T,1,index).T new_log,index = torch.sort(log_softmax,dim = -1,descending=True) new_log = torch.exp(new_log[:,:20]) index = index[:,:20] matrix = [] for i in range(log_softmax.shape[0]): str = converter.decode(index[i].data, Variable(torch.IntTensor([index[i].shape[0]])).data, raw=False) new_log_ = new_log[i].cpu().detach().numpy().tolist() count = 0 temp = [] for j in range(index.shape[1] - 1): if str[j] >= 'a' and str[j] <= 'z' or str[j] >= 'A' and str[j] <= 'Z': continue else: count+= 1 temp.append([new_log_[j],str[j]]) if count >= 5: break if len(temp) < 5: [temp.append([1e-10,"#"]) for i in range(5 - len(temp))] #print(temp) matrix.append(temp) #print('results: {0}'.format(sim_pred),"\n",matrix) return sim_pred,matrix def OCR_OR_LOGMAX(addition = None,fileName = None): config, args = parse_arg() device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') model = crnn.get_crnn(config).to(device) checkpoint = torch.load(args.checkpoint) if 'state_dict' in checkpoint.keys(): model.load_state_dict(checkpoint['state_dict']) else: model.load_state_dict(checkpoint) img = cv2.imread(fileName) img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) converter = utils.strLabelConverter(config.DATASET.ALPHABETS) result,matrix = recognition(config, img, model, converter, device) #print(result) if addition != None: return result,matrix else: return result
def ReadWebFile(url, time_range, filter = None): import http.client import ssl timeout = 5 try: [proto, _] = url.split("://") hostname = _.split("/")[0] path = _[len(hostname):] if proto == "https": ssl_context = ssl._create_unverified_context() conn = http.client.HTTPSConnection(hostname, port = 443, timeout = timeout, context = ssl_context) else: conn = http.client.HTTPConnection(hostname, port = 80, timeout = timeout) conn.request(method = "GET", url = path) resp = conn.getresponse() all_lines = resp.read().decode("utf-8").rstrip().splitlines() except Exception as e: return e conn.close() matches = [] for line in all_lines: timestamp = int(line[:10]) if timestamp < time_range[0]: continue if timestamp > time_range[1]: break if AnalyzeLine(line, filter): matches.append(line) return matches def ReadLocalFile(filename, time_range, filter = None): fields = ['timestamp','elapsed','client_ip','code','bytes','method','url','rfc931','peer_status','type'] matches = [] try: fh = open(filename, "r") except: raise Exception("ERROR: could not read log file '" + filename + "'") for line in fh: timestamp = int(line[:10]) if timestamp < time_range[0]: continue if timestamp > time_range[1]: break result = AnalyzeLine(line, filter) if result: matches.append(result) #_ = line.split() #matches.append(dict(zip(fields, _))) return matches def AnalyzeLine(line,filter = None): from datetime import datetime fields = ['timestamp','elapsed','client_ip','code','bytes','method','url','rfc931','peer_status','type'] if filter: if filter in line: #lines.append(line.split()) _ = line.split() #datetimestr = datetime.fromtimestamp(int(_[0][0:10]), tz=None) #_[0] = datetimestr.strftime("%d-%m-%y %H:%M:%S") return dict(zip(fields, _)) else: #lines.append(line.split()) _ = line.split() #datetimestr = datetime.fromtimestamp(int(_[0][0:10]), tz=None) #_[0] = datetimestr.strftime("%d-%m-%y %H:%M:%S") return dict(zip(fields, _)) return def GetData(): from datetime import datetime from time import time from math import floor now = floor(time()) #now = 1617379601 time_range = (now - 3600 * 6, now) hostnames = [] for _ in range(1,5): hostnames.append('gcp-prox01-p00{}'.format(_)) fields = ['timestamp','elapsed','client_ip','code','bytes','method','url','rfc931','peer_status','type'] entries = [] reporters = {}; client_ips = {}; usernames = {}; codes = {} for hostname in hostnames: filter = None _ = ReadLocalFile("/mnt/web/buckets/j5-org/temp/" + hostname + ".log", time_range, filter) #lines = ReadWebFile("http://j5-org.storage.googleapis.com/temp/" + hostname + ".log", time_range) reporters[hostname] = len(_) entries.extend(_) #for i in range(len(_)-1, 0, -1): #for line in _: # _ = line.split() # client_ip = _[2] # client_ips[client_ip] = client_ips[client_ip]+1 if client_ip in client_ips else 1 # code = _[3] # codes[code] = codes[code]+1 if code in codes else 1 # datetimestr = datetime.fromtimestamp(int(_[0][0:10]), tz=None) # _[0] = datetimestr.strftime("%d-%m-%y %H:%M:%S") # entries.append(dict(zip(fields, _))) #entries = sorted(entries, key=lambda x: x['timestamp'], reverse=True) #return entries, reporters, client_ips, codes #lines.append(parts) #return data[0:3], reporters #newest_first = sorted(data, key=lambda x: x[0], reverse=True) #return newest_first[0:3], reporters #data = [] #return entries, reporters #return entries, reporters, client_ips, codes newest_first = sorted(entries, key=lambda x: x['timestamp'], reverse=True) del entries return newest_first, reporters, client_ips, codes #return newest_first, reporters, client_ips, codes #return newest_first, reporters data = [] #for i in range(len(entries)-1, 0, -1): for line in newest_first: _ = line.split() #print(entries[i]) #_ = entries[i] #print(_[0]) client_ip = _[2] client_ips[client_ip] = client_ips[client_ip]+1 if client_ip in client_ips else 1 code = _[3] codes[code] = codes[code]+1 if code in codes else 1 #if client_ip in client_ips: # client_ips[client_ip] += 1 #else: # client_ips[client_ip] = 1 #entry = {'reporter': file, 'data': line} #_.insert(0, hostname) #data.append(_) datetimestr = datetime.fromtimestamp(int(_[0][0:10]), tz=None) _[0] = datetimestr.strftime("%d-%m-%y %H:%M:%S") data.append(dict(zip(fields, _))) del newest_first return data, reporters, client_ips, codes
import datetime import glob import json import logging import os import re import shutil import socket import subprocess import sys import work_queue as wq from collections import defaultdict, Counter from hashlib import sha1 from lobster import fs, util from lobster.cmssw import dash from lobster.core import unit from lobster.core import Algo from lobster.core import MergeTaskHandler from WMCore.Storage.SiteLocalConfig import loadSiteLocalConfig, SiteConfigError logger = logging.getLogger('lobster.source') class ReleaseSummary(object): """Summary of returned tasks. Prints a user-friendly summary of which tasks returned with what exit code/status. """ flags = { wq.WORK_QUEUE_RESULT_INPUT_MISSING: "missing input", # 1 wq.WORK_QUEUE_RESULT_OUTPUT_MISSING: "missing output", # 2 wq.WORK_QUEUE_RESULT_STDOUT_MISSING: "no stdout", # 4 wq.WORK_QUEUE_RESULT_SIGNAL: "signal received", # 8 wq.WORK_QUEUE_RESULT_RESOURCE_EXHAUSTION: "exhausted resources", # 16 wq.WORK_QUEUE_RESULT_TASK_TIMEOUT: "time out", # 32 wq.WORK_QUEUE_RESULT_UNKNOWN: "unclassified error", # 64 wq.WORK_QUEUE_RESULT_FORSAKEN: "unrelated error", # 128 wq.WORK_QUEUE_RESULT_MAX_RETRIES: "exceed # retries", # 256 wq.WORK_QUEUE_RESULT_TASK_MAX_RUN_TIME: "exceeded runtime" # 512 } def __init__(self): self.__exe = {} self.__wq = {} self.__taskdirs = {} self.__monitors = [] def exe(self, status, taskid): try: self.__exe[status].append(taskid) except KeyError: self.__exe[status] = [taskid] def wq(self, status, taskid): for flag in ReleaseSummary.flags.keys(): if status == flag: try: self.__wq[flag].append(taskid) except KeyError: self.__wq[flag] = [taskid] def dir(self, taskid, taskdir): self.__taskdirs[taskid] = taskdir def monitor(self, taskid): self.__monitors.append(taskid) def __str__(self): s = "received the following task(s):\n" for status in sorted(self.__exe.keys()): s += "returned with status {0}: {1}\n".format(status, ", ".join(self.__exe[status])) if status != 0: s += "parameters and logs in:\n\t{0}\n".format( "\n\t".join([self.__taskdirs[t] for t in self.__exe[status]])) for flag in sorted(self.__wq.keys()): s += "failed due to {0}: {1}\nparameters and logs in:\n\t{2}\n".format( ReleaseSummary.flags[flag], ", ".join(self.__wq[flag]), "\n\t".join([self.__taskdirs[t] for t in self.__wq[flag]])) if self.__monitors: s += "resource monitoring unavailable for the following tasks: {0}\n".format(", ".join(self.__monitors)) # Trim final newline return s[:-1] class TaskProvider(util.Timing): def __init__(self, config): util.Timing.__init__(self, 'dash', 'handler', 'updates', 'elk', 'transfers', 'cleanup', 'propagate', 'sqlite') self.config = config self.basedirs = [config.base_directory, config.startup_directory] self.workdir = config.workdir self._storage = config.storage self.statusfile = os.path.join(self.workdir, 'status.json') self.siteconf = os.path.join(self.workdir, 'siteconf') self.parrot_path = os.path.dirname(util.which('parrot_run')) self.parrot_bin = os.path.join(self.workdir, 'bin') self.parrot_lib = os.path.join(self.workdir, 'lib') self.__algo = Algo(config) self.__host = socket.getfqdn() try: siteconf = loadSiteLocalConfig() self.__ce = siteconf.siteName self.__se = siteconf.localStageOutPNN() self.__frontier_proxy = siteconf.frontierProxies[0] except (SiteConfigError, IndexError): logger.error("can't load siteconfig, defaulting to hostname") self.__ce = socket.getfqdn() self.__se = socket.getfqdn() try: self.__frontier_proxy = os.environ['HTTP_PROXY'] except KeyError: logger.error("can't determine proxy for Frontier via $HTTP_PROXY") sys.exit(1) try: with open('/etc/cvmfs/default.local') as f: lines = f.readlines() except IOError: lines = [] for l in lines: m = re.match('\s*CVMFS_HTTP_PROXY\s*=\s*[\'"]?(.*)[\'"]?', l) if m: self.__cvmfs_proxy = m.group(1).strip("\"'") break else: try: self.__cvmfs_proxy = os.environ['HTTP_PROXY'] except KeyError: logger.error("can't determine proxy for CVMFS via $HTTP_PROXY") sys.exit(1) logger.debug("using {} as proxy for CVMFS".format(self.__cvmfs_proxy)) logger.debug("using {} as proxy for Frontier".format(self.__frontier_proxy)) logger.debug("using {} as osg_version".format(self.config.advanced.osg_version)) util.sendemail("Your Lobster project has started!", self.config) self.__taskhandlers = {} self.__store = unit.UnitStore(self.config) self.__setup_inputs() self.copy_siteconf() create = not util.checkpoint(self.workdir, 'id') if create: self.taskid = 'lobster_{0}_{1}'.format( self.config.label, sha1(str(datetime.datetime.utcnow())).hexdigest()[-16:]) util.register_checkpoint(self.workdir, 'id', self.taskid) shutil.copy(self.config.base_configuration, os.path.join(self.workdir, 'config.py')) else: self.taskid = util.checkpoint(self.workdir, 'id') util.register_checkpoint(self.workdir, 'RESTARTED', str(datetime.datetime.utcnow())) if not util.checkpoint(self.workdir, 'executable'): # We can actually have more than one exe name (one per task label) # Set 'cmsRun' if any of the tasks are of that type, # or use cmd command if all tasks execute the same cmd, # or use 'noncmsRun' if task cmds are different # Using this for dashboard exe name reporting cmsconfigs = [wflow.pset for wflow in self.config.workflows] cmds = [wflow.command for wflow in self.config.workflows] if any(cmsconfigs): exename = 'cmsRun' elif all(x == cmds[0] and x is not None for x in cmds): exename = cmds[0] else: exename = 'noncmsRun' util.register_checkpoint(self.workdir, 'executable', exename) for wflow in self.config.workflows: if create and not util.checkpoint(self.workdir, wflow.label): wflow.setup(self.workdir, self.basedirs) logger.info("querying backend for {0}".format(wflow.label)) with fs.alternative(): dataset_info = wflow.dataset.get_info() logger.info("registering {0} in database".format(wflow.label)) self.__store.register_dataset(wflow, dataset_info, wflow.category.runtime) util.register_checkpoint(self.workdir, wflow.label, 'REGISTERED') elif os.path.exists(os.path.join(wflow.workdir, 'running')): for id in self.get_taskids(wflow.label): util.move(wflow.workdir, id, 'failed') for wflow in self.config.workflows: if wflow.parent: getattr(self.config.workflows, wflow.parent.label).register(wflow) if create: total_units = wflow.dataset.total_units * len(wflow.unique_arguments) self.__store.register_dependency(wflow.label, wflow.parent.label, total_units) if not util.checkpoint(self.workdir, 'sandbox cmssw version'): util.register_checkpoint(self.workdir, 'sandbox', 'CREATED') versions = set([w.version for w in self.config.workflows]) if len(versions) == 1: util.register_checkpoint(self.workdir, 'sandbox cmssw version', list(versions)[0]) if self.config.elk: if create: categories = {wflow.category.name: [] for wflow in self.config.workflows} for category in categories: for workflow in self.config.workflows: if workflow.category.name == category: categories[category].append(workflow.label) self.config.elk.create(categories) else: self.config.elk.resume() self.config.advanced.dashboard.setup(self.config) if create: self.config.save() self.config.advanced.dashboard.register_run() else: self.config.advanced.dashboard.update_task_status( (id_, dash.ABORTED) for id_ in self.__store.reset_units() ) for p in (self.parrot_bin, self.parrot_lib): if not os.path.exists(p): os.makedirs(p) for exe in ('parrot_run', 'chirp', 'chirp_put', 'chirp_get'): shutil.copy(util.which(exe), self.parrot_bin) subprocess.check_call(["strip", os.path.join(self.parrot_bin, exe)]) p_helper = os.path.join(os.path.dirname(self.parrot_path), 'lib', 'lib64', 'libparrot_helper.so') shutil.copy(p_helper, self.parrot_lib) def copy_siteconf(self): storage_in = os.path.join(os.path.dirname(__file__), 'data', 'siteconf', 'PhEDEx', 'storage.xml') storage_out = os.path.join(self.siteconf, 'PhEDEx', 'storage.xml') if not os.path.exists(os.path.dirname(storage_out)): os.makedirs(os.path.dirname(storage_out)) xml = '' for n, server in enumerate(self.config.advanced.xrootd_servers): xml += ' <lfn-to-pfn protocol="xrootd{}"'.format('' if n == 0 else '-fallback{}'.format(n)) \ + ' destination-match=".*" path-match="/+store/(.*)"' \ + ' result="root://{}//store/$1"/>\n'.format(server) with open(storage_in) as fin: with open(storage_out, 'w') as fout: fout.write(fin.read().format(xrootd_rules=xml)) jobconfig_in = os.path.join(os.path.dirname(__file__), 'data', 'siteconf', 'JobConfig', 'site-local-config.xml') jobconfig_out = os.path.join(self.siteconf, 'JobConfig', 'site-local-config.xml') if not os.path.exists(os.path.dirname(jobconfig_out)): os.makedirs(os.path.dirname(jobconfig_out)) xml = '' for n, server in enumerate(self.config.advanced.xrootd_servers): xml += ' <catalog url="trivialcatalog_file:siteconf/PhEDEx/storage.xml?protocol=xrootd{}"/>\n'.format( '' if n == 0 else '-fallback{}'.format(n)) with open(jobconfig_in) as fin: with open(jobconfig_out, 'w') as fout: fout.write(fin.read().format(xrootd_catalogs=xml)) def __find_root(self, label): while getattr(self.config.workflows, label).parent: label = getattr(self.config.workflows, label).parent return label def __setup_inputs(self): self._inputs = [ (self.siteconf, 'siteconf', False), (os.path.join(os.path.dirname(__file__), 'data', 'wrapper.sh'), 'wrapper.sh', True), (os.path.join(os.path.dirname(__file__), 'data', 'task.py'), 'task.py', True), (os.path.join(os.path.dirname(__file__), 'data', 'report.json.in'), 'report.json.in', True), (self.parrot_bin, 'bin', True), (self.parrot_lib, 'lib', True), ] # Files to make the task wrapper work without referencing WMCore # from somewhere else import WMCore base = os.path.dirname(WMCore.__file__) reqs = [ "__init__.py", "Algorithms", "Configuration.py", "DataStructs", "FwkJobReport", "Services", "Storage", "WMException.py", "WMExceptions.py" ] for f in reqs: self._inputs.append((os.path.join(base, f), os.path.join("python", "WMCore", f), True)) if 'X509_USER_PROXY' in os.environ: self._inputs.append((os.environ['X509_USER_PROXY'], 'proxy', False)) def get_taskids(self, label, status='running'): # Iterates over the task directories and returns all taskids found # therein. parent = os.path.join(self.workdir, label, status) for d in glob.glob(os.path.join(parent, '*', '*')): yield int(os.path.relpath(d, parent).replace(os.path.sep, '')) def get_report(self, label, task): return os.path.join(self.workdir, label, 'successful', util.id2dir(task), 'report.json') def obtain(self, total, tasks): """ Obtain tasks from the project. Will create tasks for all workflows, if possible. Merge tasks are always created, given enough successful tasks. The remaining tasks are split proportionally between the categories based on remaining resources multiplied by cores used per task. Within categories, tasks are created based on the same logic. Parameters ---------- total : int Number of cores available. tasks : dict Dictionary with category names as keys and the number of tasks in the queue as values. """ remaining = dict((wflow, self.__store.work_left(wflow.label)) for wflow in self.config.workflows) taskinfos = [] for wflow in self.config.workflows: taskinfos += self.__store.pop_unmerged_tasks(wflow.label, wflow.merge_size, 10) for label, ntasks, taper in self.__algo.run(total, tasks, remaining): infos = self.__store.pop_units(label, ntasks, taper) logger.debug("created {} tasks for workflow {}".format(len(infos), label)) taskinfos += infos if not taskinfos or len(taskinfos) == 0: return [] tasks = [] ids = [] registration = dict( zip( [t[0] for t in taskinfos], self.config.advanced.dashboard.register_tasks(t[0] for t in taskinfos) ) ) for (id, label, files, lumis, unique_arg, merge) in taskinfos: wflow = getattr(self.config.workflows, label) ids.append(id) jdir = util.taskdir(wflow.workdir, id) inputs = list(self._inputs) inputs.append((os.path.join(jdir, 'parameters.json'), 'parameters.json', False)) outputs = [(os.path.join(jdir, f), f) for f in ['report.json']] monitorid, syncid = registration[id] config = { 'mask': { 'files': None, 'lumis': None, 'events': None }, 'monitoring': { 'monitorid': monitorid, 'syncid': syncid, 'taskid': self.taskid, }, 'default host': self.__host, 'default ce': self.__ce, 'default se': self.__se, 'arguments': None, 'output files': [], 'want summary': True, 'executable': None, 'pset': None, 'prologue': None, 'epilogue': None, 'gridpack': False } cmd = 'sh wrapper.sh python task.py parameters.json' env = { 'LOBSTER_CVMFS_PROXY': self.__cvmfs_proxy, 'LOBSTER_FRONTIER_PROXY': self.__frontier_proxy, 'LOBSTER_OSG_VERSION': self.config.advanced.osg_version } if merge: missing = [] infiles = [] inreports = [] for task, _, _, _ in lumis: report = self.get_report(label, task) _, infile = list(wflow.get_outputs(task))[0] if os.path.isfile(report): inreports.append(report) infiles.append((task, infile)) else: missing.append(task) if len(missing) > 0: template = "the following have been marked as failed because their output could not be found: {0}" logger.warning(template.format(", ".join(map(str, missing)))) self.__store.update_missing(missing) if len(infiles) <= 1: # FIXME report these back to the database and then skip # them. Without failing these task ids, accounting of # running tasks is going to be messed up. logger.debug("skipping task {0} with only one input file!".format(id)) # takes care of the fields set to None in config wflow.adjust(config, env, jdir, inputs, outputs, merge, reports=inreports) files = infiles else: # takes care of the fields set to None in config wflow.adjust(config, env, jdir, inputs, outputs, merge, unique=unique_arg) handler = wflow.handler(id, files, lumis, jdir, merge=merge) # set input/output transfer parameters self._storage.preprocess(config, merge or wflow.parent) # adjust file and lumi information in config, add task specific # input/output files handler.adjust(config, inputs, outputs, self._storage) with open(os.path.join(jdir, 'parameters.json'), 'w') as f: json.dump(config, f, indent=2) f.write('\n') tasks.append(('merge' if merge else wflow.category.name, cmd, id, inputs, outputs, env, jdir)) self.__taskhandlers[id] = handler logger.info("creating task(s) {0}".format(", ".join(map(str, ids)))) self.config.advanced.dashboard.free() return tasks def release(self, tasks): fail_cleanup = [] merge_cleanup = [] input_cleanup = [] update = defaultdict(list) propagate = defaultdict(dict) input_files = defaultdict(set) summary = ReleaseSummary() transfers = defaultdict(lambda: defaultdict(Counter)) with self.measure('dash'): self.config.advanced.dashboard.update_task_status( (task.tag, dash.DONE) for task in tasks ) for task in tasks: with self.measure('updates'): handler = self.__taskhandlers[task.tag] failed, task_update, file_update, unit_update = handler.process(task, summary, transfers) wflow = getattr(self.config.workflows, handler.dataset) with self.measure('elk'): if self.config.elk: self.config.elk.index_task(task) self.config.elk.index_task_update(task_update) with self.measure('handler'): if failed: faildir = util.move(wflow.workdir, handler.id, 'failed') summary.dir(str(handler.id), faildir) fail_cleanup.extend([lf for rf, lf in handler.outputs]) else: util.move(wflow.workdir, handler.id, 'successful') merge = isinstance(handler, MergeTaskHandler) if (wflow.merge_size <= 0 or merge) and len(handler.outputs) > 0: outfn = handler.outputs[0][1] outinfo = handler.output_info for dep in wflow.dependents: propagate[dep.label][outfn] = outinfo if merge: merge_cleanup.extend(handler.input_files) if wflow.cleanup_input: input_files[handler.dataset].update(set([f for (_, _, f) in file_update])) update[(handler.dataset, handler.unit_source)].append((task_update, file_update, unit_update)) del self.__taskhandlers[task.tag] with self.measure('dash'): self.config.advanced.dashboard.update_task_status( (task.tag, dash.RETRIEVED) for task in tasks ) if len(update) > 0: with self.measure('sqlite'): logger.info(summary) self.__store.update_units(update) with self.measure('cleanup'): if len(input_files) > 0: input_cleanup.extend(self.__store.finished_files(input_files)) for cleanup in [fail_cleanup, merge_cleanup + input_cleanup]: if len(cleanup) > 0: try: fs.remove(*cleanup) except (IOError, OSError): pass except ValueError as e: logger.error("error removing {0}:\n{1}".format(task.tag, e)) with self.measure('propagate'): for label, infos in propagate.items(): unique_args = getattr(self.config.workflows, label).unique_arguments self.__store.register_files(infos, label, unique_args) if len(transfers) > 0: with self.measure('transfers'): self.__store.update_transfers(transfers) if self.config.elk: with self.measure('elk'): try: self.config.elk.index_summary(self.__store.workflow_status()) except Exception as e: logger.error('ELK failed to index summary:\n{}'.format(e)) def terminate(self): self.config.advanced.dashboard.update_task_status( (str(id), dash.CANCELLED) for id in self.__store.running_tasks() ) def done(self): left = self.__store.unfinished_units() return self.__store.merged() and left == 0 def max_taskid(self): return self.__store.max_taskid() def update(self, queue): # update dashboard status for all unfinished tasks. # WAITING_RETRIEVAL is not a valid status in dashboard, # so skipping it for now. exclude_states = (dash.DONE, dash.WAITING_RETRIEVAL) try: self.config.advanced.dashboard.update_tasks(queue, exclude_states) except Exception as e: logger.warning("could not update task states to dashboard") logger.exception(e) def update_stuck(self): """Have the unit store updated the statistics for stuck units. """ self.__store.update_workflow_stats_stuck() def update_runtime(self, category): """Update the runtime for all workflows with the corresponding category. """ update = [] for wflow in self.config.workflows: if wflow.category == category: update.append((category.runtime, wflow.label)) self.__store.update_workflow_runtime(update) def tasks_left(self): return self.__store.estimate_tasks_left() def work_left(self): return self.__store.unfinished_units()
<gh_stars>1-10 #Author: <NAME> import matplotlib.pyplot as plt import math import logging log = logging.getLogger(__name__) from .img_utils import * def calculate_rdf(filteredvertices,rows,cols,scale, increment = 4, progress = False): '''Calculates RDF from list of vertices of particles. :param list filteredvertices: list of vertices of particles. :param int rows: number of rows in image. :param int cols: number of cols in image. :param float scale: scale of pixels in image (m/pixel) :param int increment: Increment resolution of RDF in pixels. :param bool progress: Optionally print progress. :return list xRDF: x values of RDF :return list yRDF: y values of RDF ''' if progress == True: print("Calculating minimum RDF...") #Create blank canvases to draw particle pairs on. particleAimg = np.zeros((rows, cols, 3), np.uint8) particleAimg[:] = (0, 0, 0) particleBimg = np.zeros((rows, cols, 3), np.uint8) particleBimg[:] = (0, 0, 0) #Calculation of RDF. minrange = 1 maxrange = int(((rows**2)+(cols**2))**0.5) xRDF = range(minrange,maxrange,increment) #Start with no intersects at any radius. AllIntersectsAtRadius=[] for i in xRDF: AllIntersectsAtRadius.append(0) numberofparticles = len(filteredvertices) particle_index = 1 #Calculate all particleA to particleB pairs for particleA in filteredvertices: if progress == True: log.info('RDF calculation on ' + str(particle_index) + '/' + str(numberofparticles)) particle_index += 1 restofvertices=[particleB for particleB in filteredvertices if np.array_equal(particleA,particleB) == False] (particleAx,particleAy),particleAradius = cv2.minEnclosingCircle(particleA) particleAx = int(particleAx) particleAy = int(particleAy) particleAradius = int(particleAradius) cv2.polylines(particleAimg,[particleA],True,(0,0,255)) for particleB in restofvertices: #paint blank particleBimg with a filled particleB. cv2.polylines(particleBimg,[particleB],True,(0,255,0)) #might be unnecessary cv2.fillPoly(particleBimg,[particleB],(0,255,0)) (particleBx,particleBy),particleBradius = cv2.minEnclosingCircle(particleB) particleBx=int(particleBx) particleBy=int(particleBy) particleBradius=int(particleBradius) ABintersects=[] min_distance_for_this_pair = (distance_formula((particleAx,particleAy),(particleBx,particleBy)) - int(particleBradius) * 1.5) max_distance_for_this_pair = (distance_formula((particleAx,particleAy),(particleBx,particleBy)) + int(particleBradius) * 1.5) for i in xRDF: doesABintersectAtRadius=0 if min_distance_for_this_pair < i < max_distance_for_this_pair: #Draw a circle of radius i originating from center of particleA. cv2.circle(particleAimg,(particleAx,particleAy),i,(255,0,0)) #Combine the two images. combinedimg=cv2.addWeighted(particleBimg,1,particleAimg,1,0) krange=range((particleBx-particleBradius),(particleBx+particleBradius)) krangetrim=[k for k in krange if k < cols] lrange=range((particleBy-particleBradius),(particleBy+particleBradius)) lrangetrim=[l for l in lrange if l < rows] #Search general region of particle B to see if the circle of radius i shows up in it. #If so, there is an intersect between particles A & B at radius i. for k in krangetrim: for l in lrangetrim: if (combinedimg.item(l,k,0) == 255 and combinedimg.item(l,k,1) == 255): doesABintersectAtRadius = 1 break ABintersects.append(doesABintersectAtRadius) particleAimg[:] = (0, 0, 0) particleBimg[:] = (0, 0, 0) AllIntersectsAtRadius=[x + y for x, y in zip(AllIntersectsAtRadius, ABintersects)] yRDF=[i/float(numberofparticles) for i in AllIntersectsAtRadius] #Convert pixels to unit of distance. xRDF=[x*scale for x in xRDF] return xRDF,yRDF def output_rdf(xRDF,yRDF,imgname,conversion, outputpath=''): '''Plots a given rdf. :param string outputpath: path to output directory. ''' if conversion == 1: om = 0 distanceunit = 'pixels' else: om = int(math.floor(math.log10(conversion))) distanceunit= 'meters E' + str(om) xRDF = [round(i * 10 ** (-1*om),2) for i in xRDF] plt.plot(xRDF,yRDF, label="_nolegend_",marker='o', linestyle = "None") font={"fontname":"serif"} plt.ylim([0,max(yRDF)+(max(yRDF)/10.0)]) plt.xlim([0,max(xRDF)]) plt.title("minRDF",**font) plt.xlabel("distance / " + distanceunit,**font) plt.ylabel("Frequency",**font) plt.grid() plt.savefig(os.path.join(outputpath, "rdf_" + str(imgname).split("/")[-1]), bbox_inches = 'tight') plt.close() #plt.show() outfile = open(os.path.join(outputpath, "data_rdf_" + imgname.split('/')[-1].split(".")[0] + ".txt"), "w") for i,j in zip(xRDF,yRDF): outfile.write(str(i) + " " + str(j) + "\n") outfile.close() return def particle_size_histogram(arealist, filtered, imgname, outputpath='', conversion = 0): '''Plots particle size histogram. :param list arealist: list of the areas of particles. :param string imgname: name of the img (needed for writing output) :param string outputpath: path to output directory. :param float conversion: order of magnitude of scale in image, if 1 scale is in pixels. ''' font={"fontname":"serif"} _, bins, _ = plt.hist(arealist, bins=len(arealist) + 1, edgecolor='black', linewidth=1.2, rwidth=0.9, label='Original', color = "royalblue") plt.hist(filtered, bins=len(arealist) + 1, range=(bins.min(), bins.max()), edgecolor='black', linewidth=1.2, rwidth=0.9, label='Filtered', alpha = 0.6, color = "darkorange") plt.title("Particle Size " + str(imgname).split("/")[-1] ,**font) if conversion == 1: plt.xlabel('Pixels**2',**font) else: plt.xlabel('Meters**2',**font) plt.ylabel("Frequency",**font) plt.xlim([0,max(arealist)]) plt.legend() plt.savefig(os.path.join(outputpath, "hist_" + str(imgname).split("/")[-1]), bbox_inches = 'tight') plt.close() #plt.show() return def aspect_ratios(filteredvertices): '''Calculates aspect ratios of particles. :param list filteredvertices: list of detected particles. :return list aspect_ratios: list of respective aspect ratios.''' aspect_ratios = [] for cont in filteredvertices: x,y,w,h = cv2.boundingRect(cont) aspect_ratio = float(w)/h aspect_ratios.append(aspect_ratio) return aspect_ratios def remove_outliers(areas): if len(areas) > 1: mu = np.median(areas) std = np.std(areas) filtered = [x for x in areas if (x < mu + (1.75*std) and x > mu - (1*std))] else: filtered = areas return filtered
<reponame>jacklee1792/spiggy import base64 import gzip import io import json import re import struct from pathlib import Path from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union from backend import constants _here = Path(__file__).parent with open(_here/'exceptions/enchants.json') as f: ENCHANT_EXCEPTIONS = json.load(f) with open(_here/'exceptions/reforges.json') as f: REFORGE_EXCEPTIONS = json.load(f) def _pop_byte(bytes_f: BinaryIO) -> int: return int.from_bytes(bytes_f.read(1), byteorder='big', signed=True) def _pop_ushort(bytes_f: BinaryIO) -> int: return int.from_bytes(bytes_f.read(2), byteorder='big', signed=False) def _pop_short(bytes_f: BinaryIO) -> int: return int.from_bytes(bytes_f.read(2), byteorder='big', signed=True) def _pop_int(bytes_f: BinaryIO) -> int: return int.from_bytes(bytes_f.read(4), byteorder='big', signed=True) def _pop_long(bytes_f: BinaryIO) -> int: return int.from_bytes(bytes_f.read(8), byteorder='big', signed=True) def _pop_string(bytes_f: BinaryIO) -> str: payload = _pop_ushort(bytes_f) return bytes_f.read(payload).decode('utf-8') class NbtTag: """ Class defining an NbtTag: a value with an intrinsic name. """ name: str value: Any def __init__(self, name: str, value: Any): """ Construct an NbtTag instance. :param name: The name of the NbtTag. :param value: The value of the NbtTag. """ self.name = name self.value = value def __getitem__(self, key: Union[str, int]): """ Call __getitem__ on the NbtTag's value instance variable. :param key: The desired key. :return: The value of the key in the value instance variable. """ return self.value[key] def parse_byte(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, _pop_byte(bytes_f)) def parse_short(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, _pop_short(bytes_f)) def parse_int(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, _pop_int(bytes_f)) def parse_long(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, _pop_long(bytes_f)) def parse_float(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, struct.unpack('>f', bytes_f.read(4))) def parse_double(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, struct.unpack('>d', bytes_f.read(8))) def parse_byte_array(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' payload = _pop_int(bytes_f) arr = [_pop_byte(bytes_f) for _ in range(payload)] return NbtTag(name, arr) def parse_string(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' return NbtTag(name, _pop_string(bytes_f)) def parse_list(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' content_type = _pop_byte(bytes_f) payload = _pop_int(bytes_f) ret = [] for _ in range(payload): ret.append(PARSERS[content_type](bytes_f, read_name=False)) return NbtTag(name, ret) def parse_compound(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' tag_type = _pop_byte(bytes_f) ret = {} while tag_type != 0: tag = PARSERS[tag_type](bytes_f) ret[tag.name] = tag.value tag_type = _pop_byte(bytes_f) return NbtTag(name, ret) def parse_int_array(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' payload = _pop_int(bytes_f) arr = [_pop_int(bytes_f) for _ in range(payload)] return NbtTag(name, arr) def parse_long_array(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag: name = _pop_string(bytes_f) if read_name else '' payload = _pop_int(bytes_f) arr = [_pop_long(bytes_f) for _ in range(payload)] return NbtTag(name, arr) PARSERS = [ None, parse_byte, parse_short, parse_int, parse_long, parse_float, parse_double, parse_byte_array, parse_string, parse_list, parse_compound, parse_int_array, parse_long_array ] def _without_nbt_style(s: str) -> str: """ Given a full string with NBT styling, return the string without coloring and recomb symbols. :param s: The given string. :return: The given string without NBT styling. """ return re.sub('§ka|§.', '', s).strip() def deserialize(b64: str) -> NbtTag: """ Decode the gzipped base-64 encoding of an item's metadata. :param b64: The gzipped base-64 item metadata. :return: A NbtTag with the decoded metadata. """ bytes_gz = base64.b64decode(b64) bytes_f = io.BytesIO(gzip.decompress(bytes_gz)) # Pop the outer compound tag indicator _pop_byte(bytes_f) return parse_compound(bytes_f) def _get_extra_attrs(nbt: NbtTag) -> Dict[str, Any]: """ Helper method to get the 'ExtraAttributes' tag compound from an item NbtTag. Useful for other extraction methods. :param nbt: The NbtTag to be read. :return: The 'ExtraAttributes' tag compound. """ return nbt['i'][0]['tag']['ExtraAttributes'] def _get_pet_attrs(nbt: NbtTag) -> Dict[str, Any]: """ Helper method to get the 'petInfo' tag and parse it into a dictionary. Returns an empty dictionary if no pet attributes are found. :param nbt: The NbtTag to be read. :return: Dictionary containing the pet attributes of the item. """ extra_attrs = _get_extra_attrs(nbt) as_str = extra_attrs.get('petInfo', '{}') return json.loads(as_str) def extract_api_id(nbt: NbtTag) -> str: """ Get the API ID of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The ID of the item, directly as it appears in the Skyblock API. """ extra_attrs = _get_extra_attrs(nbt) return extra_attrs['id'] def extract_generic_base_name(nbt: NbtTag) -> str: """ Given the NbtTag corresponding to an item, return its generic base name. This corresponds to removing special symbols and reforges from the raw display name. Often, dropping the first word is enough to remove the reforge, but some exceptions apply and are specified in REFORGE_EXCEPTIONS. :param nbt: The NbtTag to be read. :return: The name of the item with extra symbols removed and reforge dropped, if applicable. """ name = re.sub('[✪⚚✦◆™©�]', '', extract_generic_display_name(nbt)).strip() # No reforge, we are done if not extract_reforge(nbt): return name general_case = name.split(' ', 1)[-1] # If it's not an exception, just return the general case return REFORGE_EXCEPTIONS.get(name, general_case) def extract_generic_display_name(nbt: NbtTag) -> str: """ Extract the raw display name of an item (with NBT styling) from its NbtTag. :param nbt: The NbtTag to be read. :return: The api_name of the item, as a string. """ return _without_nbt_style(nbt['i'][0]['tag']['display']['Name']) def extract_identifiers(nbt: NbtTag) -> Tuple[str, str, str]: """ Extract the item ID, base name, and display name of an items from its NbtTag. :param nbt: The NbtTag to be read. :return: A tuple describing the item ID, base name, and display name of the item. """ api_id = extract_api_id(nbt) # Specialization for single-enchantment books if api_id == 'ENCHANTED_BOOK' and \ len(enchants := extract_enchants(nbt)) == 1: enchant, lvl = enchants[0] # Replace enchant if it matches an exception enchant = ENCHANT_EXCEPTIONS.get(enchant, enchant) item_id = f'{enchant.upper()}_{lvl}_BOOK' base_name = item_id.title().replace('_', ' ') display_name = base_name # Specialization for runes elif api_id == 'RUNE': rune, lvl = extract_rune(nbt) item_id = f'{rune}_RUNE_{lvl}' base_name = extract_generic_base_name(nbt).rsplit(' ', 1)[0] \ + f' {lvl}' display_name = extract_generic_display_name(nbt) # Specialization for pets elif api_id == 'PET': pet_type = extract_pet_type(nbt) item_id = f'{pet_type}_PET' base_name = item_id.title().replace('_', ' ') display_name = extract_generic_display_name(nbt) # Specialization for cake souls elif api_id == 'CAKE_SOUL': item_id = 'CAKE_SOUL' base_name = 'Cake Soul' display_name = extract_generic_display_name(nbt) # General case else: # Drop the fragment prefix item_id = api_id.removeprefix('STARRED_') base_name = extract_generic_base_name(nbt) display_name = extract_generic_display_name(nbt) return item_id, base_name, display_name def extract_stack_size(nbt: NbtTag) -> int: """ Get the number of items in an item stack from the associated NbtTag. :param nbt: The NbtTag to be read. :return: The number of items in the item stack. """ return nbt['i'][0]['Count'] def extract_rarity(nbt: NbtTag) -> str: """ Get the rarity of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The rarity of the item. """ try: lore = nbt['i'][0]['tag']['display']['Lore'] rarity_line = nbt['i'][0]['tag']['display']['Lore'][-1].value # Some runes have a weird footer in their lore if extract_api_id(nbt) == 'RUNE': for tag in lore: line = tag.value if _without_nbt_style(line).endswith('COSMETIC'): rarity_line = line words = _without_nbt_style(rarity_line).split() # Account for 'VERY_SPECIAL' case rarity = words[0] if words[0] != 'VERY' else 'VERY_SPECIAL' return rarity if rarity in constants.DISPLAY_RARITIES.keys() else 'UNKNOWN' except KeyError: # Some weird items don't have lore for some reason return 'UNKNOWN' def extract_rune(nbt: NbtTag) -> Optional[Tuple[str, int]]: """ Get rune information of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The rune of the item as a (rune name, level) pair, or None if no rune is associated with the item. """ extra_attrs = _get_extra_attrs(nbt) if 'runes' in extra_attrs: return list(extra_attrs['runes'].items())[0] return None def extract_enchants(nbt: NbtTag) -> List[Tuple[str, int]]: """ Get enchantment information of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: A list of (enchantment, level) pairs describing the enchantments on the item """ extra_attrs = _get_extra_attrs(nbt) enchantments = extra_attrs.get('enchantments', {}).items() return [(ench, lvl) for ench, lvl in enchantments] def extract_is_recombobulated(nbt: NbtTag) -> bool: """ Determine whether or not an item is recombobulated from its NbtTag. :param nbt: The NbtTag to be read. :return: Boolean, whether or not the item is recombobulated. """ extra_attrs = _get_extra_attrs(nbt) return 'rarity_upgrades' in extra_attrs def extract_is_fragged(nbt: NbtTag) -> bool: """ Determine whether or not an item has a Bonzo or Livid fragment applied to it from its NbtTag. :param nbt: The NbtTag to be read. :return: Boolean, whether or not the item is fragged. """ return extract_api_id(nbt).startswith('STARRED_') def extract_hot_potato_count(nbt: NbtTag) -> int: """ Determine the number of hot potato book upgrades on an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The number of hot potato book upgrades on the given item. """ extra_attrs = _get_extra_attrs(nbt) return extra_attrs.get('hot_potato_count', 0) def extract_reforge(nbt: NbtTag) -> Optional[str]: """ Get the reforge on an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The reforge of the item, or None if no reforge is present. """ extra_attrs = _get_extra_attrs(nbt) return extra_attrs.get('modifier') def extract_dungeon_stars(nbt: NbtTag) -> int: """ Get the number of dungeon stars on an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The number of dungeon stars on the item. """ extra_attrs = _get_extra_attrs(nbt) return extra_attrs.get('dungeon_item_level', 0) def extract_pet_type(nbt: NbtTag) -> Optional[str]: """ Get the pet type of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The pet type of the item, if applicable. """ pet_attrs = _get_pet_attrs(nbt) return pet_attrs.get('type') def extract_pet_exp(nbt: NbtTag) -> float: """ Get the pet experience of an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The pet experience on the item. """ pet_attrs = _get_pet_attrs(nbt) return pet_attrs.get('exp', 0) def extract_pet_candy_used(nbt: NbtTag) -> int: """ Get the number of pet candies used on an item from its NbtTag. :param nbt: The NbtTag to be read. :return: The number of pet candies on the item. """ pet_attrs = _get_pet_attrs(nbt) return pet_attrs.get('candyUsed', 0)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jan 18 2021 CO2 emissions for MSOAs or LSOAs combining 2 years at a time, IO part adapted from code by <NAME> @author: lenakilian """ import pandas as pd import pickle import numpy as np df = pd.DataFrame ################ # IO functions # ################ def make_Z_from_S_U(S,U): Z = np.zeros(shape = (np.size(S,0)+np.size(U,0),np.size(S,1)+np.size(U,1))) Z[np.size(S,0):,0:np.size(U,1)] = U Z[0:np.size(S,0),np.size(U,1):] = S return Z def make_x(Z,Y): x = np.sum(Z,1)+np.sum(Y,1) x[x == 0] = 0.000000001 return x def make_L(Z,x): bigX = np.zeros(shape = (len(Z))) bigX = np.tile(np.transpose(x),(len(Z),1)) A = np.divide(Z,bigX) L = np.linalg.inv(np.identity(len(Z))-A) return L #################### # demand functions # #################### def make_Yhh_106(Y_d,years,meta): total_Yhh_106 = {} col = Y_d[years[0]].columns[0:36] idx = Y_d[years[0]].index[0:106] for yr in years: temp = np.zeros(shape = [106,36]) for r in range(0,meta['reg']['len']): temp = temp + Y_d[yr].iloc[r*106:(r+1)*106,0:36].values total_Yhh_106[yr] = df(temp, index =idx, columns =col) return total_Yhh_106 def make_Yhh_112(Y_d,years,meta): total_Yhh_112 = {} col = Y_d[years[0]].columns[0:36] idx = Y_d[years[0]].index[0:112] for yr in years: temp = np.zeros(shape = [112,36]) for r in range(0,meta['reg']['len']): temp = temp + Y_d[yr].iloc[r*112:(r+1)*112,0:36].values total_Yhh_112[yr] = df(temp, index =idx, columns =col) return total_Yhh_112 ################## # LCFS functions # ################## def convert36to33(Y,concs_dict2,years): Y2 = {} for yr in years: temp = np.dot(Y[yr],concs_dict2['C43_to_C40']) Y2[yr] = df(temp, index = Y[yr].index, columns = concs_dict2['C43_to_C40'].columns) return Y2 def expected_totals(hhspenddata, years, concs_dict2, total_Yhh_106): coicop_exp_tot = {} for year in years: temp = np.sum(hhspenddata[year], 0) corrector = np.zeros(shape = 307) start = 0 end = 0 corrector = [] for i in range(0, 33): conc = concs_dict2[str(i) + 'a'] end = len(conc.columns) + start lcf_subtotal = np.sum(np.dot(temp, conc)) required_subtotal = np.sum(total_Yhh_106[year].iloc[:, i]) corrector += [required_subtotal/lcf_subtotal for i in range(start, end)] start = end coicop_exp_tot[year] = np.dot(temp, np.diag(corrector)) return(coicop_exp_tot) def make_y_hh_307(Y,coicop_exp_tot,years,concs_dict2,meta): yhh_wide = {} for yr in years: temp = np.zeros(shape = [meta['fd']['len_idx'],307]) countstart = 0 countend = 0 col = [] for a in range(0,33): conc = np.tile(concs_dict2[str(a)],(meta['reg']['len'],1)) countend = np.sum(np.sum(concs_dict2[str(a)+'a']))+countstart category_total = np.dot(coicop_exp_tot[yr],concs_dict2[str(a)+'a']) #test1 = np.dot(np.diag(Y[yr].iloc[:,a]),conc) test1 = np.dot(conc,np.diag(category_total)) #test2 = np.tile(np.dot(Y[yr].iloc[:,a],conc),(1590,1)) test2 = np.transpose(np.tile(np.dot(conc,category_total),(np.size(conc,1),1))) test3 = test1/test2 test3 = np.nan_to_num(test3, copy=True) #num = np.dot(conc,np.diag(category_total)) #test4 = np.multiply(num,test3) test4 = np.dot(np.diag(Y[yr].iloc[:,a]),test3) #den = np.dot(np.diag(np.sum(num,1)),concs_dict2[str(a)]) #prop = np.divide(num,den) #prop = np.nan_to_num(prop, copy=True) #temp[:,countstart:countend] = (np.dot(np.diag(total_Yhh_106[yr].iloc[:,a]),prop)) temp[:,countstart:countend] = test4 col[countstart:countend] = concs_dict2[str(a) + 'a'].columns countstart = countend yhh_wide[yr] = df(temp, columns = col) return yhh_wide def make_y_hh_prop(Y,total_Yhh_106,meta,years): yhh_prop = {} for yr in years: temp = np.zeros(shape=(len(Y[yr]))) for r in range(0,meta['reg']['len']): temp[r*106:(r+1)*106] = np.divide(np.sum(Y[yr].iloc[r*106:(r+1)*106,0:36],1),np.sum(total_Yhh_106[yr],1)) np.nan_to_num(temp, copy = False) yhh_prop[yr] = temp return yhh_prop def make_new_Y(Y,yhh_wide,meta,years): newY = {} col = [] for yr in years: temp = np.zeros(shape=[len(Y[yr]),314]) temp[:,0:307] = yhh_wide[yr] temp[:,307:314] = Y[yr].iloc[:,33:40] col[0:307] = yhh_wide[yr].columns col[307:314] = Y[yr].iloc[:,33:40].columns newY[yr] = df(temp, index = Y[yr].index, columns = col) return newY def make_ylcf_props(hhspenddata,years): ylcf_props = {} for yr in years: totalspend = np.sum(hhspenddata[yr].loc[:,'1.1.1.1':'192.168.3.11']) temp = np.divide(hhspenddata[yr].loc[:,'1.1.1.1':'192.168.3.11'],np.tile(totalspend,[len(hhspenddata[yr]),1])) np.nan_to_num(temp, copy = False) ylcf_props[yr] = df(temp, index = hhspenddata[yr].index) return ylcf_props def makefoot(S,U,Y,stressor,years): footbyCOICOP = {} for yr in years: temp = np.zeros(shape = 307) Z = make_Z_from_S_U(S[yr],U[yr]) bigY = np.zeros(shape = [np.size(Y[yr],0)*2,np.size(Y[yr],1)]) bigY[np.size(Y[yr],0):np.size(Y[yr],0)*2,0:] = Y[yr] x = make_x(Z,bigY) L = make_L(Z,x) bigstressor = np.zeros(shape = [np.size(Y[yr],0)*2,1]) bigstressor[0:np.size(Y[yr],0),:] = stressor[yr] e = np.sum(bigstressor,1)/x eL = np.dot(e,L) for a in range(0,307): temp[a] = np.dot(eL,bigY[:,a]) footbyCOICOP[yr] = temp return footbyCOICOP ########### # Run all # ########### def make_footprint(hhdspend, wd): """ Calculate consumption-based household GHG emissions for MSOAs or LSOAs from the LCFS (emissios calculated in LCFS_aggregation_combined_years.py) and the UKMRIO 2020 """ ############# # load data # ############# # load meta data from [UKMRIO] meta = pickle.load(open(wd + 'data/raw/UKMRIO_2021/meta.p', "rb" )) # create year lists years = list(hhdspend.keys()) # load and clean up concs to make it usable # these translate IO data sectors to LCFS products/services concs_dict2 = pd.read_excel(wd + 'data/raw/Concordances/ONS_to_COICOP_LCF_concs_2021.xlsx', sheet_name=None, index_col=0) ####################### # aggregate emissions # ####################### # get mean from 2 years # calculate differnece between years in household data to calculate means for other vairables # Load UKMRIO and calculate means for UKMRIO data ukmrio = {}; #means = {} for data in ['ghg', 'uk_ghg_direct', 'S', 'U', 'Y']: ukmrio[data] = pickle.load(open(wd + 'data/raw/UKMRIO_2021/' + data + '.p', "rb" )) ukmrio['Y'] = convert36to33(ukmrio['Y'], concs_dict2, years) total_Yhh_112 = make_Yhh_112(ukmrio['Y'], years, meta) coicop_exp_tot = expected_totals(hhdspend, list(hhdspend.keys()), concs_dict2, total_Yhh_112) yhh_wide = make_y_hh_307(ukmrio['Y'], coicop_exp_tot, list(hhdspend.keys()), concs_dict2, meta) newY = make_new_Y(ukmrio['Y'], yhh_wide, meta, list(hhdspend.keys())) ylcf_props = make_ylcf_props(hhdspend, list(hhdspend.keys())) COICOP_ghg = makefoot(ukmrio['S'], ukmrio['U'], newY, ukmrio['ghg'], list(hhdspend.keys())) Total_ghg = {}; multipliers = {} for year in list(hhdspend.keys()): COICOP_ghg[year][160] += ukmrio['uk_ghg_direct'][year][1] COICOP_ghg[year][101] += ukmrio['uk_ghg_direct'][year][0] # multipliers tCO2e/GBP multipliers[year] = df(COICOP_ghg[year], columns=['total_ghg'], index=hhdspend[year].columns) multipliers[year]['total_spend'] = hhdspend[year].sum(0) multipliers[year]['multipliers'] = multipliers[year]['total_ghg'] / multipliers[year]['total_spend'] # this gives GHG emissions for the groups, break down to per capita emissions temp = np.dot(ylcf_props[year], np.diag(COICOP_ghg[year])) Total_ghg[year] = df(temp, index=hhdspend[year].index, columns=hhdspend[year].columns) return(Total_ghg, multipliers)
import unittest import ast import mock from kalliope.core.Models.Player import Player from kalliope.core.Models.Tts import Tts from kalliope.core.Models.Trigger import Trigger from kalliope.core.Models.Stt import Stt from kalliope.core.Models.RestAPI import RestAPI from kalliope.core.Models.Dna import Dna from kalliope.core import LIFOBuffer from kalliope.core.Models.Settings import Settings from kalliope.core.Models import Neuron, Order, Synapse, Brain, Event, Resources, Singleton from kalliope.core.Models.APIResponse import APIResponse from kalliope.core.Models.MatchedSynapse import MatchedSynapse class TestModels(unittest.TestCase): def setUp(self): # Kill the singleton Singleton._instances = dict() # Init neuron1 = Neuron(name='neurone1', parameters={'var1': 'val1'}) neuron2 = Neuron(name='neurone2', parameters={'var2': 'val2'}) neuron3 = Neuron(name='neurone3', parameters={'var3': 'val3'}) neuron4 = Neuron(name='neurone4', parameters={'var4': 'val4'}) signal1 = Order(sentence="this is the sentence") signal2 = Order(sentence="this is the second sentence") signal3 = Order(sentence="that is part of the third sentence") self.synapse1 = Synapse(name="Synapse1", neurons=[neuron1, neuron2], signals=[signal1]) self.synapse2 = Synapse(name="Synapse2", neurons=[neuron3, neuron4], signals=[signal2]) self.synapse3 = Synapse(name="Synapse3", neurons=[neuron2, neuron4], signals=[signal3]) self.all_synapse_list1 = [self.synapse1, self.synapse2, self.synapse3] self.all_synapse_list2 = [self.synapse2, self.synapse3] self.brain_test1 = Brain(synapses=self.all_synapse_list1) self.brain_test2 = Brain(synapses=self.all_synapse_list2) # this brain is the same as the first one self.brain_test3 = Brain(synapses=self.all_synapse_list1) self.settings_test = Settings(default_synapse="Synapse3") # clean the LiFO LIFOBuffer.lifo_list = list() def test_APIResponse(self): user_order = "user order" self.matched_synapse = MatchedSynapse(matched_synapse=self.synapse1, matched_order=user_order) api_response = APIResponse() api_response.user_order = user_order api_response.list_processed_matched_synapse = [self.matched_synapse] expected_result_serialize = { 'status': None, 'matched_synapses': [ { 'matched_order': 'user order', 'neuron_module_list': [], 'synapse_name': 'Synapse1' } ], 'user_order': 'user order' } self.assertDictEqual(expected_result_serialize, api_response.serialize()) def test_Brain(self): # test get synapse by name expect_result = self.synapse1 synapse_name = "Synapse1" self.assertEqual(self.brain_test1.get_synapse_by_name(synapse_name), expect_result) # test equals self.assertTrue(self.brain_test1.__eq__(self.brain_test3)) # test not equals self.assertFalse(self.brain_test1.__eq__(self.brain_test2)) def test_Dna(self): # create DNA object dna1 = Dna(name="dna1", module_type="neuron", author="kalliope", kalliope_supported_version="0.4.4", tags="test") dna2 = Dna(name="dna2", module_type="neuron", author="community", kalliope_supported_version="0.4.2", tags="other") # this dna is exactly the same as the first one dna3 = Dna(name="dna1", module_type="neuron", author="kalliope", kalliope_supported_version="0.4.4", tags="test") expected_result_serialize = { 'kalliope_supported_version': '0.4.4', 'tags': 'test', 'type': 'neuron', 'name': 'dna1', 'author': 'kalliope' } self.assertDictEqual(expected_result_serialize, dna1.serialize()) self.assertTrue(dna1.__eq__(dna3)) self.assertFalse(dna1.__eq__(dna2)) def test_Event(self): event1 = Event(year=2017, month=12, day=31, week=53, day_of_week=2, hour=8, minute=30, second=0) event2 = Event(year=2018, month=11, day=30, week=25, day_of_week=4, hour=9, minute=40, second=0) # same as the event1 event3 = Event(year=2017, month=12, day=31, week=53, day_of_week=2, hour=8, minute=30, second=0) expected_result_serialize = { 'event': { 'week': 53, 'second': 0, 'minute': 30, 'hour': 8, 'year': 2017, 'day': 31, 'day_of_week': 2, 'month': 12 } } self.assertDictEqual(expected_result_serialize, event1.serialize()) self.assertTrue(event1.__eq__(event3)) self.assertFalse(event1.__eq__(event2)) def test_MatchedSynapse(self): user_order = "user order" matched_synapse1 = MatchedSynapse(matched_synapse=self.synapse1, matched_order=user_order) matched_synapse2 = MatchedSynapse(matched_synapse=self.synapse2, matched_order=user_order) matched_synapse3 = MatchedSynapse(matched_synapse=self.synapse1, matched_order=user_order) expected_result_serialize = { 'matched_order': 'user order', 'neuron_module_list': [], 'synapse_name': 'Synapse1' } self.assertDictEqual(expected_result_serialize, matched_synapse1.serialize()) self.assertTrue(matched_synapse1.__eq__(matched_synapse3)) self.assertFalse(matched_synapse1.__eq__(matched_synapse2)) # test neuron parameter loader is called with mock.patch("kalliope.core.NeuronParameterLoader.get_parameters") as mock_get_parameters: MatchedSynapse(matched_synapse=self.synapse1, matched_order=user_order, user_order=user_order) mock_get_parameters.assert_called_once_with(synapse_order=user_order, user_order=user_order) mock_get_parameters.reset_mock() def test_Neuron(self): neuron1 = Neuron(name="test", parameters={"key1": "val1", "key2": "val2"}) neuron2 = Neuron(name="test", parameters={"key3": "val3", "key4": "val4"}) neuron3 = Neuron(name="test", parameters={"key1": "val1", "key2": "val2"}) expected_result_serialize = {'name': 'test', 'parameters': {'key2': 'val2', 'key1': 'val1'}} self.assertDictEqual(expected_result_serialize, neuron1.serialize()) self.assertTrue(neuron1.__eq__(neuron3)) self.assertFalse(neuron1.__eq__(neuron2)) # test password neuron_name = "test" neuron_parameters = { "password": "<PASSWORD>", "parameter": "test" } neuron = Neuron() neuron.name = neuron_name neuron.parameters = neuron_parameters expected_result_str = "{'name': 'test', 'parameters': {'password': '*****', 'parameter': 'test'}}" self.assertDictEqual(ast.literal_eval(neuron.__str__()), ast.literal_eval(expected_result_str)) neuron_name = "test" neuron_parameters = { "password_parameter": "<PASSWORD>", "parameter": "test" } neuron = Neuron() neuron.name = neuron_name neuron.parameters = neuron_parameters expected_result_str = "{'name': 'test', 'parameters': {'parameter': 'test', 'password_parameter': '*****'}}" self.assertDictEqual(ast.literal_eval(neuron.__str__()), ast.literal_eval(expected_result_str)) def test_Order(self): order1 = Order(sentence="this is an order") order2 = Order(sentence="this is an other order") order3 = Order(sentence="this is an order") expected_result_serialize = {'order': 'this is an order'} expected_result_str = "{'order': 'this is an order'}" self.assertEqual(expected_result_serialize, order1.serialize()) self.assertEqual(expected_result_str, order1.__str__()) self.assertTrue(order1.__eq__(order3)) self.assertFalse(order1.__eq__(order2)) def test_Resources(self): resource1 = Resources(neuron_folder="/path/neuron", stt_folder="/path/stt", tts_folder="/path/tts", trigger_folder="/path/trigger") resource2 = Resources(neuron_folder="/other_path/neuron", stt_folder="/other_path/stt", tts_folder="/other_path/tts", trigger_folder="/other_path/trigger") resource3 = Resources(neuron_folder="/path/neuron", stt_folder="/path/stt", tts_folder="/path/tts", trigger_folder="/path/trigger") expected_result_serialize = { 'tts_folder': '/path/tts', 'neuron_folder': '/path/neuron', 'stt_folder': '/path/stt', 'trigger_folder': '/path/trigger' } self.assertDictEqual(expected_result_serialize, resource1.serialize()) self.assertTrue(resource1.__eq__(resource3)) self.assertFalse(resource1.__eq__(resource2)) def test_RestAPI(self): rest_api1 = RestAPI(password_protected=True, login="admin", password="password", active=True, port=5000, allowed_cors_origin="*") rest_api2 = RestAPI(password_protected=False, active=False, port=5000, allowed_cors_origin=None) rest_api3 = RestAPI(password_protected=True, login="admin", password="password", active=True, port=5000, allowed_cors_origin="*") expected_result_serialize = { 'password_protected': True, 'port': 5000, 'active': True, 'allowed_cors_origin': '*', 'password': 'password', 'login': 'admin' } self.assertDictEqual(expected_result_serialize, rest_api1.serialize()) self.assertTrue(rest_api1.__eq__(rest_api3)) self.assertFalse(rest_api1.__eq__(rest_api2)) def test_Settings(self): with mock.patch('platform.machine', return_value='pumpkins'): rest_api1 = RestAPI(password_protected=True, login="admin", password="password", active=True, port=5000, allowed_cors_origin="*") setting1 = Settings(default_tts_name="pico2wav", default_stt_name="google", default_trigger_name="swoyboy", default_player_name="mplayer", ttss=["ttts"], stts=["stts"], random_wake_up_answers=["yes"], random_wake_up_sounds=None, play_on_ready_notification=False, on_ready_answers=None, on_ready_sounds=None, triggers=["snowboy"], players=["mplayer"], rest_api=rest_api1, cache_path="/tmp/kalliope", default_synapse="default_synapse", resources=None, variables={"key1": "val1"}) setting1.kalliope_version = "0.4.5" setting2 = Settings(default_tts_name="accapela", default_stt_name="bing", default_trigger_name="swoyboy", default_player_name="mplayer", ttss=["ttts"], stts=["stts"], random_wake_up_answers=["no"], random_wake_up_sounds=None, play_on_ready_notification=False, on_ready_answers=None, on_ready_sounds=None, triggers=["snowboy"], rest_api=rest_api1, cache_path="/tmp/kalliope_tmp", default_synapse="my_default_synapse", resources=None, variables={"key1": "val1"}) setting2.kalliope_version = "0.4.5" setting3 = Settings(default_tts_name="pico2wav", default_stt_name="google", default_trigger_name="swoyboy", default_player_name="mplayer", ttss=["ttts"], stts=["stts"], random_wake_up_answers=["yes"], random_wake_up_sounds=None, play_on_ready_notification=False, on_ready_answers=None, on_ready_sounds=None, triggers=["snowboy"], players=["mplayer"], rest_api=rest_api1, cache_path="/tmp/kalliope", default_synapse="default_synapse", resources=None, variables={"key1": "val1"}) setting3.kalliope_version = "0.4.5" expected_result_serialize = { 'default_synapse': 'default_synapse', 'default_tts_name': 'pico2wav', 'rest_api': { 'password_protected': True, 'port': 5000, 'active': True, 'allowed_cors_origin': '*', 'password': 'password', 'login': 'admin' }, 'play_on_ready_notification': False, 'default_stt_name': 'google', 'kalliope_version': '0.4.5', 'random_wake_up_sounds': None, 'on_ready_answers': None, 'default_trigger_name': 'swoyboy', 'default_player_name': 'mplayer', 'cache_path': '/tmp/kalliope', 'stts': ['stts'], 'machine': 'pumpkins', 'random_wake_up_answers': ['yes'], 'on_ready_sounds': None, 'ttss': ['ttts'], 'variables': {'key1': 'val1'}, 'resources': None, 'triggers': ['snowboy'], 'rpi_settings': None, 'players': ['mplayer'] } self.assertDictEqual(expected_result_serialize, setting1.serialize()) self.assertTrue(setting1.__eq__(setting3)) self.assertFalse(setting1.__eq__(setting2)) def test_Stt(self): stt1 = Stt(name="stt1", parameters={"key1": "val1"}) stt2 = Stt(name="stt2", parameters={"key2": "val2"}) stt3 = Stt(name="stt1", parameters={"key1": "val1"}) expected_result_serialize = {'name': 'stt1', 'parameters': {'key1': 'val1'}} self.assertDictEqual(expected_result_serialize, stt1.serialize()) self.assertTrue(stt1.__eq__(stt3)) self.assertFalse(stt1.__eq__(stt2)) def test_Synapse(self): neuron1 = Neuron(name='neurone1', parameters={'var1': 'val1'}) neuron2 = Neuron(name='neurone2', parameters={'var2': 'val2'}) neuron3 = Neuron(name='neurone3', parameters={'var3': 'val3'}) neuron4 = Neuron(name='neurone4', parameters={'var4': 'val4'}) signal1 = Order(sentence="this is the sentence") signal2 = Order(sentence="this is the second sentence") synapse1 = Synapse(name="Synapse1", neurons=[neuron1, neuron2], signals=[signal1]) synapse2 = Synapse(name="Synapse2", neurons=[neuron3, neuron4], signals=[signal2]) synapse3 = Synapse(name="Synapse1", neurons=[neuron1, neuron2], signals=[signal1]) expected_result_serialize = { 'signals': [ { 'order': 'this is the sentence' } ], 'neurons': [ { 'name': 'neurone1', 'parameters': { 'var1': 'val1' } }, { 'name': 'neurone2', 'parameters': { 'var2': 'val2' } } ], 'name': 'Synapse1' } self.assertDictEqual(expected_result_serialize, synapse1.serialize()) self.assertTrue(synapse1.__eq__(synapse3)) self.assertFalse(synapse1.__eq__(synapse2)) def test_Trigger(self): trigger1 = Trigger(name="trigger1", parameters={"key1": "val1"}) trigger2 = Trigger(name="trigger2", parameters={"key2": "val2"}) trigger3 = Trigger(name="trigger1", parameters={"key1": "val1"}) expected_result_serialize = {'name': 'trigger1', 'parameters': {'key1': 'val1'}} self.assertDictEqual(expected_result_serialize, trigger1.serialize()) self.assertTrue(trigger1.__eq__(trigger3)) self.assertFalse(trigger1.__eq__(trigger2)) def test_Player(self): player1 = Player(name="player1", parameters={"key1": "val1"}) player2 = Player(name="player2", parameters={"key2": "val2"}) player3 = Player(name="player1", parameters={"key1": "val1"}) expected_result_serialize = {'name': 'player1', 'parameters': {'key1': 'val1'}} self.assertDictEqual(expected_result_serialize, player1.serialize()) self.assertTrue(player1.__eq__(player3)) self.assertFalse(player1.__eq__(player2)) def test_Tts(self): tts1 = Tts(name="tts1", parameters={"key1": "val1"}) tts2 = Tts(name="tts2", parameters={"key2": "val2"}) tts3 = Tts(name="tts1", parameters={"key1": "val1"}) expected_result_serialize = {'name': 'tts1', 'parameters': {'key1': 'val1'}} self.assertDictEqual(expected_result_serialize, tts1.serialize()) self.assertTrue(tts1.__eq__(tts3)) self.assertFalse(tts1.__eq__(tts2))
<gh_stars>10-100 # !/usr/bin/env python # -*- coding: utf-8 -*- """ Defines the unit tests for the :mod:`colour_hdri.exposure.common` module. """ import numpy as np import unittest from colour_hdri.exposure import ( average_luminance, average_illuminance, luminance_to_exposure_value, illuminance_to_exposure_value, adjust_exposure) __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2015-2021 - Colour Developers' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = '<EMAIL>' __status__ = 'Production' __all__ = [ 'TestAverageLuminance', 'TestAverageIlluminance', 'TestLuminanceToExposureValue', 'TestAdjustExposure', ] class TestAverageLuminance(unittest.TestCase): """ Defines :func:`colour_hdri.exposure.common.average_luminance` definition unit tests methods. """ def test_average_luminance(self): """ Tests :func:`colour_hdri.exposure.common.average_luminance` definition. """ np.testing.assert_almost_equal( average_luminance( np.array([2.8, 5.6, 8]), np.array([0.125, 0.5, 1.0]), np.array([100, 800, 16000]), ), np.array([7.84000000, 0.98000000, 0.05000000]), decimal=7) class TestAverageIlluminance(unittest.TestCase): """ Defines :func:`colour_hdri.exposure.common.average_illuminance` definition unit tests methods. """ def test_average_illuminance(self): """ Tests :func:`colour_hdri.exposure.common.average_illuminance` definition. """ np.testing.assert_almost_equal( average_illuminance( np.array([2.8, 5.6, 8]), np.array([0.125, 0.5, 1.0]), np.array([100, 800, 16000]), ), np.array([156.80000000, 19.60000000, 1.00000000]), decimal=7) class TestLuminanceToExposureValue(unittest.TestCase): """ Defines :func:`colour_hdri.exposure.common.luminance_to_exposure_value` definition unit tests methods. """ def test_luminance_to_exposure_value(self): """ Tests :func:`colour_hdri.exposure.common.luminance_to_exposure_value` definition. """ np.testing.assert_almost_equal( luminance_to_exposure_value( np.array([0.125, 0.250, 0.125]), np.array([100, 100, 100]), np.array([12.5, 12.5, 14]), ), np.array([0.00000000, 1.00000000, -0.16349873]), decimal=7) class TestIlluminanceToExposureValue(unittest.TestCase): """ Defines :func:`colour_hdri.exposure.common.illuminance_to_exposure_value` definition unit tests methods. """ def test_illuminance_to_exposure_value(self): """ Tests :func:`colour_hdri.exposure.common.illuminance_to_exposure_value` definition. """ np.testing.assert_almost_equal( illuminance_to_exposure_value( np.array([2.5, 5.0, 0.125]), np.array([100, 100, 100]), np.array([250, 250, 340]), ), np.array([0.00000000, 1.00000000, -4.76553475]), decimal=7) class TestAdjustExposure(unittest.TestCase): """ Defines :func:`colour_hdri.exposure.common.adjust_exposure` definition unit tests methods. """ def test_adjust_exposure(self): """ Tests :func:`colour_hdri.exposure.common.adjust_exposure` definition. """ np.testing.assert_almost_equal( adjust_exposure(np.array([0.25, 0.5, 0.75, 1]), 1), np.array([0.5, 1.0, 1.5, 2.]), decimal=7) if __name__ == '__main__': unittest.main()
# -*- coding: utf-8 -*- """ @author: hsowan <<EMAIL>> @date: 2019/10/28 爬取稻壳网站上的word """ import json import os import re import time from pymongo import MongoClient from selenium import webdriver from time import sleep from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.common.by import By from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC import requests from bs4 import BeautifulSoup import sqlite3 docer_home = 'https://www.docer.com/' cookies_file = 'cookies.json' # 开启mongodb mongo_uri = 'mongodb://root:root@localhost:27017/' mongo_db = 'docer' # word | ppt | excel collection_name = 'ppt' # Todo: 使用sqlite sqlite_db = 'docer.db' # 保存爬取文件的绝对路径 save_path = '/Users/mac/Downloads/docer/word/' def download(download_url): # Selenium 如何使用webdriver下载文件(chrome浏览器): https://blog.csdn.net/weixin_41812940/article/details/82423892 options = webdriver.ChromeOptions() prefs = { 'profile.default_content_settings.popups': 0, 'download.default_directory': save_path, } options.add_experimental_option('prefs', prefs) # 开启网络日志: https://stackoverflow.com/questions/56507652/selenium-chrome-cant-see-browser-logs-invalidargumentexception options.add_experimental_option('w3c', False) caps = DesiredCapabilities.CHROME caps['loggingPrefs'] = {'performance': 'ALL'} driver = webdriver.Chrome(options=options, desired_capabilities=caps) try: # 先请求,再添加cookies # selenium.common.exceptions.InvalidCookieDomainException: Message: Document is cookie-averse driver.get(docer_home) # 从文件中获取到cookies with open(cookies_file, 'r', encoding='utf-8') as f: cookies = json.loads(f.read()) for c in cookies: driver.add_cookie({'name': c['name'], 'value': c['value'], 'path': c['path'], 'domain': c['domain'], 'secure': c['secure']}) driver.get(download_url) sleep(1) # 获取word名称 word_name = driver.find_element_by_xpath("/html/body/div[@id='__nuxt']/div[@id='__layout']/div[@id='App']/div[@class='g-router-regular']/div[2]/div[@class='preview g-clearfloat']/div[@class='preview__info']/h1[@class='preview__title']").text # 只要简历模板 # if word_name.find('简历') == -1: # return # 获取word编号 pattern = re.compile(r'\d+') word_id = pattern.findall(driver.find_element_by_xpath("/html/body/div[@id='__nuxt']/div[@id='__layout']/div[@id='App']/div[@class='g-router-regular']/div[2]/div[@class='preview g-clearfloat']/div[@class='preview__info']/ul[@class='preview__detail g-clearfloat']/li[@class='preview__detail-item'][3]").text)[0] # 是否是VIP模板 is_vip = driver.find_element_by_xpath("/html/body/div[@id='__nuxt']/div[@id='__layout']/div[@id='App']/div[@class='g-router-regular']/div[2]/div[@class='preview g-clearfloat']/div[@class='preview__info']/ul[@class='preview__detail g-clearfloat']/li[@class='preview__detail-item'][4]").text.find('VIP') != -1 # 只爬取VIP模板 if not is_vip: return # 开启mongodb # 使用mongodb保存文件信息 if col.find_one({'id': word_id}): return else: col.insert_one(dict(name=word_name, id=word_id, url=url)) # Todo: 使用sqlite代替mongodb element = WebDriverWait(driver, 10).until(EC.presence_of_element_located( (By.XPATH, "/html/body/div[@id='__nuxt']/div[@id='__layout']/div[@id='App']/div[@class='g-router-regular']/div[2]/div[@class='preview g-clearfloat']/div[@class='preview__info']/div[@class='preview__btns g-clearfloat']/span[2]")) ) # Todo: 等待并不能完全解决 element not interactable sleep(2) element.click() # 等待下载完成以及网络日志 sleep(2) # 对下载文件进行重命名 logs = driver.get_log('performance') for log in logs: if log['level'] == 'INFO': json_message = json.loads(log['message']) if json_message['message']['method'] == 'Network.requestWillBeSent': resource_uri = json_message['message']['params']['documentURL'] if resource_uri and resource_uri.count('file.cache.docer.com') == 1: # 对资源路径进行分割获取下载文件名 s = resource_uri.split('/') filename = s[len(s) - 1] # 下载文件名后缀 filename_suffix = filename.split('.')[1] # 如果存在同名文件则对文件名进行处理: 原有文件名-时间戳.文件名后缀 save_filename = word_name + '-' + str(int(time.time())) if os.path.exists(f'{save_path + word_name + "." + filename_suffix}') else word_name cmd = f'mv {save_path + filename} {save_path + save_filename + "." + filename_suffix}' os.system(cmd) break sleep(3) finally: driver.quit() if __name__ == '__main__': # Word: https://www.docer.com/s/wps/?page= # PPT: https://www.docer.com/s/wpp/?page= # Excel: https://www.docer.com/s/et/?page= base_url = 'https://www.docer.com/s/wps/?page=' for i in range(1, 314): url = base_url + str(i) headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36', 'Host': 'www.docer.com', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8' } # 开启mongodb client = MongoClient(mongo_uri) db = client[mongo_db] col = db[collection_name] # Todo: 使用sqlite代替mongodb try: r = requests.get(url, headers=headers) if r.status_code == 200: html = r.content.decode() soup = BeautifulSoup(html, 'lxml') items = soup.select('ul.m-list.g-justify-list.sub-wps-container li a') for item in items: word_uri = 'https:' + item['href'] download(word_uri) finally: client.close()
<gh_stars>1-10 from xml.dom.pulldom import default_bufsize import preprocessor as p import matplotlib.pyplot as plt import re import string import numpy as np from nltk.corpus import stopwords import nltk import matplotlib.pyplot as plt from wordcloud import WordCloud, STOPWORDS from datetime import datetime from PIL import Image, ImageFont, ImageDraw import tweepy import config import string import os import time from nltk.tokenize import sent_tokenize, word_tokenize from nltk.corpus import stopwords # nltk.download() consumer_key = config.consumer_key consumer_secret = config.consumer_secret access_token = config.access_token access_token_secret = config.access_token_secret bearer_token = config.bearer_token username = config.username password = <PASSWORD> # URL p.OPT.URL # Mention p.OPT.MENTION # Hashtag p.OPT.HASHTAG # Reserved Words p.OPT.RESERVED # Emoji p.OPT.EMOJI # Smiley p.OPT.SMILEY # Number p.OPT.NUMBER def clean_text(text): """ Function to clean the text. Parameters: text: the raw text as a string value that needs to be cleaned Returns: cleaned_text: the cleaned text as string """ # convert to lower case cleaned_text = text.lower() # remove HTML tags html_pattern = re.compile("<.*?>") cleaned_text = re.sub(html_pattern, "", cleaned_text) # remove punctuations cleaned_text = cleaned_text.translate(str.maketrans("", "", string.punctuation)) return cleaned_text.strip() def remove_whitespace(text): return " ".join(text.split()) def clean_tweets(tweet_text): p.set_options(p.OPT.URL, p.OPT.MENTION, p.OPT.EMOJI) clean_tweet_text = p.clean(tweet_text) clean_tweet_text = clean_tweet_text.replace("&amp", "") clean_tweet_text = clean_tweet_text.replace("\\n", "") return clean_tweet_text def create_filename(root, ext): current_datetime = datetime.now() str_current_datetime = str(current_datetime) new_filename = root + str_current_datetime new_filename = remove_whitespace(new_filename) new_filename = new_filename.replace(":", "").replace(".", "") new_filename = new_filename.replace(" ", "") new_filename = new_filename + ext return new_filename def delete_files(): os.remove("molegtoconst.txt") os.remove("consttomoleg.txt") os.remove(mfile_name_png) os.remove(mfile_name_jpg) os.remove(cfile_name_png) os.remove(cfile_name_jpg) os.remove(merged_filename) morewords = [ "moleg", "Missouri", "make", "whatever", "say", "self", "defense", "morning", "back", "stand", "says", "ground", "rt", "will", "one", "now", "im", "new", "mo", "dont", "u", "state", "rt", "'moleg", "rt '", "rt'", "moleg'", ] STOPWORDS.update(morewords) stopwords_ls = list(set(stopwords.words("english"))) stopwords_ls = [clean_text(word) for word in stopwords_ls] mo_mask = np.array(Image.open("legislator.jpg")) title_font = ImageFont.truetype("AllerDisplay.ttf", 45) user_id = "4591016128" # Nicky mtitle_text = "Schwardon Tweets" user_id2 = "605754185" # <NAME> ctitle_text = "Koenig Tweets" # MOLEG to CONST client = tweepy.Client(bearer_token=bearer_token) response = client.get_users_tweets( user_id, tweet_fields=["created_at"], max_results=100 ) tweets = response.data metadata = response.meta next_token = metadata.get("next_token") while next_token is not None: for tweet in tweets: tweet_text = tweet.text tweet_clean_text = clean_tweets(tweet.text) tweet_created_at = tweet.created_at tweet_clean_text = clean_text(tweet_clean_text) words = word_tokenize(tweet_clean_text) # print(tweet_text + " posted with " + tweet.source) wordsFiltered = [] for w in words: if w not in stopwords_ls: wordsFiltered.append(w) tweet_clean_text = str(wordsFiltered) print("moleg list") print(tweet_clean_text) print("\n") print(tweet_created_at) print("\n") print("--------------------------------------------------------------------") with open("molegtoconst.txt", "a") as f: f.write(tweet_clean_text) f.write("\n") f.close() response = client.get_users_tweets( user_id, tweet_fields=["created_at"], pagination_token=next_token, max_results=100, ) tweets = response.data metadata = response.meta next_token = metadata.get("next_token") #################################################################################################################### text_file = open("molegtoconst.txt", "r") data = text_file.read() data = clean_tweets(data) text = clean_text(data) # double clean cloud = WordCloud( scale=3, max_words=125, colormap="RdYlGn", mask=mo_mask, background_color="black", stopwords=STOPWORDS, collocations=True, ).generate_from_text(data) plt.figure(figsize=(10, 8)) plt.imshow(cloud) plt.axis("off") mfile_name_png = create_filename("molegtoconst", ".png") mfile_name_jpg = create_filename("molegtoconst", ".jpg") cloud = cloud.to_file(mfile_name_png) my_image = Image.open(mfile_name_png) image_editable = ImageDraw.Draw(my_image) image_editable.text((15, 1000), mtitle_text, (143, 24, 16), font=title_font) my_image.save(mfile_name_jpg) text_file.close() ################################################################################################# # CONST to MOLEG tweets = [] response = client.get_users_tweets( user_id2, tweet_fields=["created_at"], max_results=100 ) metadata = response.meta next_token = metadata.get("next_token") tweets = response.data while next_token is not None: for tweet in tweets: tweet_text = tweet.text tweet_clean_text = clean_tweets(tweet.text) tweet_created_at = tweet.created_at tweet_clean_text = clean_text(tweet_clean_text) words = word_tokenize(tweet_clean_text) wordsFiltered = [] for w in words: if w not in stopwords_ls: wordsFiltered.append(w) tweet_clean_text = str(wordsFiltered) print(tweet_clean_text) print("\n") print(tweet_created_at) print("\n") print("--------------------------------------------------------------------") with open("consttomoleg.txt", "a") as f: f.write(tweet_clean_text) f.write("\n") f.close() response = client.get_users_tweets( user_id2, tweet_fields=["created_at"], pagination_token=next_token, max_results=100, ) tweets = response.data metadata = response.meta next_token = metadata.get("next_token") ########################################################################################### text_file = open("consttomoleg.txt", "r") data = text_file.read() data = clean_tweets(data) text = clean_text(data) # double clean cloud = WordCloud( scale=3, max_words=150, colormap="RdYlGn", mask=mo_mask, background_color="black", stopwords=STOPWORDS, collocations=True, ).generate_from_text(data) plt.figure(figsize=(10, 8)) plt.imshow(cloud) plt.axis("off") cfile_name_png = create_filename("consttomoleg", ".png") cfile_name_jpg = create_filename("consttomoleg", ".jpg") cloud = cloud.to_file(cfile_name_png) my_image = Image.open(cfile_name_png) image_editable = ImageDraw.Draw(my_image) image_editable.text((15, 1000), ctitle_text, (143, 24, 16), font=title_font) my_image.save(cfile_name_jpg) text_file.close() # Read the two images image1 = Image.open(cfile_name_jpg) image2 = Image.open(mfile_name_jpg) # resize, first image image1_size = image1.size image2_size = image2.size new_image = Image.new("RGB", (2 * image1_size[0], image1_size[1]), (250, 250, 250)) new_image.paste(image1, (0, 0)) new_image.paste(image2, (image1_size[0], 0)) merged_filename = create_filename("merged", ".jpg") new_image.save(merged_filename, "JPEG") merged_title_font = ImageFont.truetype("AllerDisplay.ttf", 45) current_datetime = datetime.now() merged_title = str(current_datetime) final_image = Image.open(merged_filename) image_editable = ImageDraw.Draw(final_image) image_editable.text((1400, 10), merged_title, (143, 24, 16), font=merged_title_font) if os.path.exists("CompareLegislators.jpg"): os.remove("CompareLegislators.jpg") final_image.save("CompareLegislators.jpg") delete_files()
<gh_stars>10-100 from __future__ import absolute_import, print_function import numpy as np import warnings def _bit_length_26(x): if x == 0: return 0 elif x == 1: return 1 else: return len(bin(x)) - 2 try: from scipy.lib._version import NumpyVersion except ImportError: import re string_types = basestring class NumpyVersion(): """Parse and compare numpy version strings. Numpy has the following versioning scheme (numbers given are examples; they can be >9) in principle): - Released version: '1.8.0', '1.8.1', etc. - Alpha: '1.8.0a1', '1.8.0a2', etc. - Beta: '1.8.0b1', '1.8.0b2', etc. - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - Development versions after a1: '1.8.0a1.dev-f1234afa', '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. - Development versions (no git hash available): '1.8.0.dev-Unknown' Comparing needs to be done against a valid version string or other `NumpyVersion` instance. Parameters ---------- vstring : str Numpy version string (``np.__version__``). Notes ----- All dev versions of the same (pre-)release compare equal. Examples -------- >>> from scipy.lib._version import NumpyVersion >>> if NumpyVersion(np.__version__) < '1.7.0': ... print('skip') skip >>> NumpyVersion('1.7') # raises ValueError, add ".0" """ def __init__(self, vstring): self.vstring = vstring ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) if not ver_main: raise ValueError("Not a valid numpy version string") self.version = ver_main.group() self.major, self.minor, self.bugfix = [int(x) for x in self.version.split('.')] if len(vstring) == ver_main.end(): self.pre_release = 'final' else: alpha = re.match(r'a\d', vstring[ver_main.end():]) beta = re.match(r'b\d', vstring[ver_main.end():]) rc = re.match(r'rc\d', vstring[ver_main.end():]) pre_rel = [m for m in [alpha, beta, rc] if m is not None] if pre_rel: self.pre_release = pre_rel[0].group() else: self.pre_release = '' self.is_devversion = bool(re.search(r'.dev-', vstring)) def _compare_version(self, other): """Compare major.minor.bugfix""" if self.major == other.major: if self.minor == other.minor: if self.bugfix == other.bugfix: vercmp = 0 elif self.bugfix > other.bugfix: vercmp = 1 else: vercmp = -1 elif self.minor > other.minor: vercmp = 1 else: vercmp = -1 elif self.major > other.major: vercmp = 1 else: vercmp = -1 return vercmp def _compare_pre_release(self, other): """Compare alpha/beta/rc/final.""" if self.pre_release == other.pre_release: vercmp = 0 elif self.pre_release == 'final': vercmp = 1 elif other.pre_release == 'final': vercmp = -1 elif self.pre_release > other.pre_release: vercmp = 1 else: vercmp = -1 return vercmp def _compare(self, other): if not isinstance(other, (string_types, NumpyVersion)): raise ValueError("Invalid object to compare with NumpyVersion.") if isinstance(other, string_types): other = NumpyVersion(other) vercmp = self._compare_version(other) if vercmp == 0: # Same x.y.z version, check for alpha/beta/rc vercmp = self._compare_pre_release(other) if vercmp == 0: # Same version and same pre-release, check if dev version if self.is_devversion is other.is_devversion: vercmp = 0 elif self.is_devversion: vercmp = -1 else: vercmp = 1 return vercmp def __lt__(self, other): return self._compare(other) < 0 def __le__(self, other): return self._compare(other) <= 0 def __eq__(self, other): return self._compare(other) == 0 def __ne__(self, other): return self._compare(other) != 0 def __gt__(self, other): return self._compare(other) > 0 def __ge__(self, other): return self._compare(other) >= 0 def __repr(self): return "NumpyVersion(%s)" % self.vstring def _next_regular(target): """ Find the next regular number greater than or equal to target. Regular numbers are composites of the prime factors 2, 3, and 5. Also known as 5-smooth numbers or Hamming numbers, these are the optimal size for inputs to FFTPACK. Target must be a positive integer. """ if target <= 6: return target # Quickly check if it's already a power of 2 if not (target & (target - 1)): return target match = float('inf') # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 while p35 < target: # Ceiling integer division, avoiding conversion to float # (quotient = ceil(target / p35)) quotient = -(-target // p35) # Quickly find next power of 2 >= quotient try: p2 = 2 ** ((quotient - 1).bit_length()) except AttributeError: # Fallback for Python <2.7 p2 = 2 ** _bit_length_26(quotient - 1) N = p2 * p35 if N == target: return N elif N < match: match = N p35 *= 3 if p35 == target: return p35 if p35 < match: match = p35 p5 *= 5 if p5 == target: return p5 if p5 < match: match = p5 return match if NumpyVersion(np.__version__) >= '1.7.1': np_matrix_rank = np.linalg.matrix_rank else: def np_matrix_rank(M, tol=None): """ Return matrix rank of array using SVD method Rank of the array is the number of SVD singular values of the array that are greater than `tol`. Parameters ---------- M : {(M,), (M, N)} array_like array of <=2 dimensions tol : {None, float}, optional threshold below which SVD values are considered zero. If `tol` is None, and ``S`` is an array with singular values for `M`, and ``eps`` is the epsilon value for datatype of ``S``, then `tol` is set to ``S.max() * max(M.shape) * eps``. Notes ----- The default threshold to detect rank deficiency is a test on the magnitude of the singular values of `M`. By default, we identify singular values less than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with the symbols defined above). This is the algorithm MATLAB uses [1]. It also appears in *Numerical recipes* in the discussion of SVD solutions for linear least squares [2]. This default threshold is designed to detect rank deficiency accounting for the numerical errors of the SVD computation. Imagine that there is a column in `M` that is an exact (in floating point) linear combination of other columns in `M`. Computing the SVD on `M` will not produce a singular value exactly equal to 0 in general: any difference of the smallest SVD value from 0 will be caused by numerical imprecision in the calculation of the SVD. Our threshold for small SVD values takes this numerical imprecision into account, and the default threshold will detect such numerical rank deficiency. The threshold may declare a matrix `M` rank deficient even if the linear combination of some columns of `M` is not exactly equal to another column of `M` but only numerically very close to another column of `M`. We chose our default threshold because it is in wide use. Other thresholds are possible. For example, elsewhere in the 2007 edition of *Numerical recipes* there is an alternative threshold of ``S.max() * np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe this threshold as being based on "expected roundoff error" (p 71). The thresholds above deal with floating point roundoff error in the calculation of the SVD. However, you may have more information about the sources of error in `M` that would make you consider other tolerance values to detect *effective* rank deficiency. The most useful measure of the tolerance depends on the operations you intend to use on your matrix. For example, if your data come from uncertain measurements with uncertainties greater than floating point epsilon, choosing a tolerance near that uncertainty may be preferable. The tolerance may be absolute if the uncertainties are absolute rather than relative. References ---------- .. [1] MATLAB reference documention, "Rank" http://www.mathworks.com/help/techdoc/ref/rank.html .. [2] <NAME>, <NAME>, <NAME> and <NAME>, "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, page 795. Examples -------- >>> from numpy.linalg import matrix_rank >>> matrix_rank(np.eye(4)) # Full rank matrix 4 >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix >>> matrix_rank(I) 3 >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 1 >>> matrix_rank(np.zeros((4,))) 0 """ M = np.asarray(M) if M.ndim > 2: raise TypeError('array should have 2 or fewer dimensions') if M.ndim < 2: return int(not all(M == 0)) S = np.linalg.svd(M, compute_uv=False) if tol is None: tol = S.max() * max(M.shape) * np.finfo(S.dtype).eps return np.sum(S > tol) class CacheWriteWarning(UserWarning): pass class CachedAttribute(object): def __init__(self, func, cachename=None, resetlist=None): self.fget = func self.name = func.__name__ self.cachename = cachename or '_cache' self.resetlist = resetlist or () def __get__(self, obj, type=None): if obj is None: return self.fget # Get the cache or set a default one if needed _cachename = self.cachename _cache = getattr(obj, _cachename, None) if _cache is None: setattr(obj, _cachename, resettable_cache()) _cache = getattr(obj, _cachename) # Get the name of the attribute to set and cache name = self.name _cachedval = _cache.get(name, None) # print("[_cachedval=%s]" % _cachedval) if _cachedval is None: # Call the "fget" function _cachedval = self.fget(obj) # Set the attribute in obj # print("Setting %s in cache to %s" % (name, _cachedval)) try: _cache[name] = _cachedval except KeyError: setattr(_cache, name, _cachedval) # Update the reset list if needed (and possible) resetlist = self.resetlist if resetlist is not (): try: _cache._resetdict[name] = self.resetlist except AttributeError: pass # else: # print("Reading %s from cache (%s)" % (name, _cachedval)) return _cachedval def __set__(self, obj, value): errmsg = "The attribute '%s' cannot be overwritten" % self.name warnings.warn(errmsg, CacheWriteWarning) class _cache_readonly(object): """ Decorator for CachedAttribute """ def __init__(self, cachename=None, resetlist=None): self.func = None self.cachename = cachename self.resetlist = resetlist or None def __call__(self, func): return CachedAttribute(func, cachename=self.cachename, resetlist=self.resetlist) cache_readonly = _cache_readonly()
<reponame>HaujetZhao/Caps_Writer<filename>src/moduels/gui/Tab_Config.py import webbrowser from PySide2.QtCore import Signal from PySide2.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QGridLayout, QGroupBox, QPushButton, QCheckBox from moduels.component.NormalValue import 常量 from moduels.gui.Group_EditableList import Group_EditableList from moduels.gui.Dialog_AddEngine import Dialog_AddEngine # from moduels.gui.Group_PathSetting import Group_PathSetting class Tab_Config(QWidget): 状态栏消息 = Signal(str, int) def __init__(self, parent=None): super(Tab_Config, self).__init__(parent) self.initElements() # 先初始化各个控件 self.initSlots() # 再将各个控件连接到信号槽 self.initLayouts() # 然后布局 self.initValues() # 再定义各个控件的值 def initElements(self): self.程序设置Box = QGroupBox(self.tr('程序设置')) self.开关_关闭窗口时隐藏到托盘 = QCheckBox(self.tr('点击关闭按钮时隐藏到托盘')) self.程序设置横向布局 = QHBoxLayout() self.引擎列表 = Group_EditableList('语音引擎', Dialog_AddEngine, 常量.数据库连接, 常量.语音引擎表单名, '引擎名称') self.常用网址Box = QGroupBox('网页控制台') self.常用网址Box布局 = QGridLayout() self.智能语音交互控制台按钮 = QPushButton('智能语音交互') self.RAM访问控制控制台按钮 = QPushButton('RAM访问控制') self.页面布局 = QVBoxLayout() def initSlots(self): self.开关_关闭窗口时隐藏到托盘.stateChanged.connect(self.设置_隐藏到状态栏) self.智能语音交互控制台按钮.clicked.connect(lambda: webbrowser.open(r'https://nls-portal.console.aliyun.com/')) self.RAM访问控制控制台按钮.clicked.connect(lambda: webbrowser.open(r'https://ram.console.aliyun.com/')) # self.路径设置Box.皮肤输出路径输入框.textChanged.connect(self.设置_皮肤输出路径) # self.路径设置Box.音效文件路径输入框.textChanged.connect(self.设置_音效文件路径) def initLayouts(self): self.程序设置横向布局.addWidget(self.开关_关闭窗口时隐藏到托盘) self.程序设置Box.setLayout(self.程序设置横向布局) self.常用网址Box布局.addWidget(self.智能语音交互控制台按钮, 0, 0) self.常用网址Box布局.addWidget(self.RAM访问控制控制台按钮, 0, 1) self.常用网址Box.setLayout(self.常用网址Box布局) self.页面布局.addWidget(self.程序设置Box) self.页面布局.addWidget(self.引擎列表) self.页面布局.addWidget(self.常用网址Box) self.页面布局.addStretch(1) self.setLayout(self.页面布局) def initValues(self): self.检查数据库() def 检查数据库(self): 数据库连接 = 常量.数据库连接 self.检查数据库_关闭时最小化(数据库连接) def 检查数据库_关闭时最小化(self, 数据库连接): result = 数据库连接.cursor().execute(f'''select value from {常量.偏好设置表单名} where item = :item''', {'item': 'hideToTrayWhenHitCloseButton'}).fetchone() if result == None: # 如果关闭窗口最小化到状态栏这个选项还没有在数据库创建,那就创建一个 初始值 = 'False' 数据库连接.cursor().execute(f'''insert into {常量.偏好设置表单名} (item, value) values (:item, :value) ''', {'item': 'hideToTrayWhenHitCloseButton', 'value':初始值}) 数据库连接.commit() self.开关_关闭窗口时隐藏到托盘.setChecked(初始值 == 'True') else: self.开关_关闭窗口时隐藏到托盘.setChecked(result[0] == 'True') # # def 检查数据库_皮肤输出路径(self, 数据库连接): # result = 数据库连接.cursor().execute(f'''select value from {常量.偏好设置表单名} where item = :item''', # {'item': 'skinOutputPath'}).fetchone() # if result == None: # 如果关闭窗口最小化到状态栏这个选项还没有在数据库创建,那就创建一个 # 初始值 = 'output' # 数据库连接.cursor().execute(f'''insert into {常量.偏好设置表单名} (item, value) values (:item, :value) ''', # {'item': 'skinOutputPath', # 'value': 初始值}) # 数据库连接.commit() # self.路径设置Box.皮肤输出路径输入框.setText(初始值) # else: # self.路径设置Box.皮肤输出路径输入框.setText(result[0]) # # def 检查数据库_音效文件路径(self, 数据库连接): # result = 数据库连接.cursor().execute(f'''select value from {常量.偏好设置表单名} where item = :item''', # {'item': 'soundFilePath'}).fetchone() # if result == None: # 如果关闭窗口最小化到状态栏这个选项还没有在数据库创建,那就创建一个 # 初始值 = 'sound' # 数据库连接.cursor().execute(f'''insert into {常量.偏好设置表单名} (item, value) values (:item, :value) ''', # {'item': 'soundFilePath', # 'value': 初始值}) # 数据库连接.commit() # self.路径设置Box.音效文件路径输入框.setText(初始值) # else: # self.路径设置Box.音效文件路径输入框.setText(result[0]) def 设置_隐藏到状态栏(self): 数据库连接 = 常量.数据库连接 数据库连接.cursor().execute(f'''update {常量.偏好设置表单名} set value = :value where item = :item''', {'item': 'hideToTrayWhenHitCloseButton', 'value': str(self.开关_关闭窗口时隐藏到托盘.isChecked())}) 数据库连接.commit() 常量.关闭时隐藏到托盘 = self.开关_关闭窗口时隐藏到托盘.isChecked() # def 设置_皮肤输出路径(self): # 数据库连接 = 常量.数据库连接 # 数据库连接.cursor().execute(f'''update {常量.数据库偏好设置表单名} set value = :value where item = :item''', # {'item': 'skinOutputPath', # 'value': self.路径设置Box.皮肤输出路径输入框.text()}) # 数据库连接.commit() # 常量.皮肤输出路径 = self.路径设置Box.皮肤输出路径输入框.text() # # # def 设置_音效文件路径(self): # 数据库连接 = 常量.数据库连接 # 数据库连接.cursor().execute(f'''update {常量.数据库偏好设置表单名} set value = :value where item = :item''', # {'item': 'soundFilePath', # 'value': self.路径设置Box.音效文件路径输入框.text()}) # 数据库连接.commit() # 常量.音效文件路径 = self.路径设置Box.音效文件路径输入框.text() def 隐藏到状态栏开关被点击(self): cursor = 常量.数据库连接.cursor() cursor.execute(f'''update {常量.数据库偏好设置表单名} set value='{str(self.开关_关闭窗口时隐藏到托盘.isChecked())}' where item = '{'hideToTrayWhenHitCloseButton'}';''') 常量.数据库连接.commit()
import tkinter as tk from tkinter import ttk from tkinter import filedialog import PyPDF2 class Application(tk.Frame): def __init__(self, root=None): super().__init__(root) self.root = root self.file_list = [] self.btn_frame = tk.Frame(self) # Buttons self.input_button = tk.Button(self.btn_frame) self.output_button = tk.Button(self.btn_frame) self.delete_button = tk.Button(self.btn_frame) self.move_up_button = tk.Button(self.btn_frame) self.move_down_button = tk.Button(self.btn_frame) # Tree view self.pdf_list = ttk.Treeview(self, columns=1, selectmode="browse", show=["headings"]) self.pdf_list_scrollbar = ttk.Scrollbar(self, orient="vertical", command=self.pdf_list.yview) self.pack() self.create_widgets() def create_widgets(self): self.root.title("PDF Merger") self.root.resizable(False, False) # Button frame self.btn_frame.grid(row=0, column=0) # input button self.input_button["text"] = "Add PDF" self.input_button["command"] = self.input_pdf self.input_button.grid(row=0, column=0) # delete button self.delete_button["text"] = "Remove PDF" self.delete_button["command"] = self.delete_pdf self.delete_button.grid(row=1, column=0) # Save As button self.output_button["text"] = "Save As" self.output_button["command"] = self.output_pdf self.output_button.grid(row=2, column=0) # Move up button self.move_up_button["text"] = "Move up" self.move_up_button["command"] = self.move_up_pdf self.move_up_button.grid(row=3, column=0) # Move down button self.move_down_button["text"] = "Move down" self.move_down_button["command"] = self.move_down_pdf self.move_down_button.grid(row=4, column=0) # Tree view self.pdf_list.grid(row=0, column=1) self.pdf_list.heading('#1', text='File Name') self.pdf_list.column('#1', width=200) # Tree Scrollbar self.pdf_list_scrollbar.grid(row=0, column=1, sticky="nse") self.pdf_list.configure(yscrollcommand=self.pdf_list_scrollbar.set) def input_pdf(self): file_names = filedialog.askopenfilenames(filetypes=[("PDF", ".pdf")]) if not file_names: return for file_name in file_names: self.file_list.append(file_name) file_name_formatted = file_name.split("/") self.pdf_list.insert(parent="", index="end", iid=None, values=file_name_formatted[-1]) return def output_pdf(self): # if no files added if not self.file_list: return result = filedialog.asksaveasfilename(defaultextension=".pdf", filetypes=[("PDF", ".pdf")]) # if save file prompt canceled if not result: return merger = PyPDF2.PdfFileMerger() for file in self.file_list: merger.append(file) merger.write(result) merger.close() def delete_pdf(self): selection = self.pdf_list.focus() if not selection: return del self.file_list[self.pdf_list.index(selection)] self.pdf_list.delete(selection) return def move_up_pdf(self): selection = self.pdf_list.focus() if not selection: return i = self.pdf_list.index(selection) if i == 0: return j = i - 1 self.pdf_list.move(selection, self.pdf_list.parent(selection), index=j) self.file_list[j], self.file_list[i] = self.file_list[i], self.file_list[j] return def move_down_pdf(self): selection = self.pdf_list.focus() if not selection: return i = self.pdf_list.index(selection) if i == len(self.file_list) - 1: return j = i + 1 self.pdf_list.move(selection, self.pdf_list.parent(selection), index=j) self.file_list[j], self.file_list[i] = self.file_list[i], self.file_list[j] return
from time import time import json import hashlib import re from cryptonote.address import validate from .constants import * from .errors import * from . import database, credit, blocks, fee, wallet, daemon, rpc, log def record_payment(uid, txid, time, amount, fee): """Record payment""" try: database.execute('INSERT INTO payments (uid, txid, time, amount_paid, amount_fee, status) VALUES (%s, %s, %s, %s, %s, %s)', (uid, txid, time, amount, fee, PAYMENT_STATUS_PENDING)) log.message('Recorded payment for user %s, txid: %s, time: %s, amount: %s, fee: %s' % (uid, txid, time, amount, fee)) return True except database.psycopg2.Error as e: raise Exception(e.pgerror) from None except Exception as e: log.error('Failed to record payment for user %s, txid: %s, time: %s, amount: %s, fee: %s' % (uid, txid, time, amount, fee)) log.error(e) return False def update_payment_status(pymt_id, txid, txhash, status): """Change payment status""" try: database.execute("UPDATE payments SET txid=%s, txhash=%s, status=%s WHERE pymt_id=%s", (txid, txhash, status, pymt_id)) log.message('Updated payment status for pymt %s, txid: %s, txhash: %s, status: %s' % (pymt_id, txid, txhash, status)) return True except database.psycopg2.Error as e: raise Exception(e.pgerror) from None except Exception as e: log.error('Failed to update payment status for pymt %s, txid: %s, txhash: %s, status: %s' % (pymt_id, txid, txhash, status)) log.error(e) return False def update_failed_payment_status(pymt_id, txid, txhash, amount_fee): """Change payment status for payments with null txid""" try: database.execute("UPDATE payments SET txid=%s, txhash=%s, amount_fee=%s WHERE pymt_id=%s", (txid, txhash, amount_fee, pymt_id)) log.message('Updated null payment %s, txid: %s, txhash: %s, fee: %s' % (pymt_id, txid, txhash, amount_fee)) return True except database.psycopg2.Error as e: raise Exception(e.pgerror) from None except Exception as e: log.error('Failed to update null payment %s, txid: %s, txhash: %s, fee: %s' % (pymt_id, txid, txhash, amount_fee)) log.error(e) return False def get_pending_payments(): """Get rows from payments table where status = 0""" try: database.execute("SELECT pymt_id, txhash, txid, amount_paid FROM payments WHERE status = %s", (PAYMENT_STATUS_PENDING,)) return database.fetchall() except database.psycopg2.Error as e: raise Exception(e.pgerror) from None except Exception as e: log.error('Failed to get pending payments') log.error(e) return [] def get_balances_and_thresholds(): """Get users sum of credits, sum of payment, user threshold and wallet address""" try: query = """ SELECT info.uid, info.wallet, info.payment_threshold, COALESCE(credits_pending.sum, 0), COALESCE(credits_matured.sum, 0), COALESCE(debits.sum, 0) FROM ( SELECT uid, payment_threshold, wallet FROM users ) AS info LEFT JOIN ( SELECT uid, SUM( COALESCE(amount_reward, 0) + COALESCE(amount_bonus, 0) + COALESCE(amount_dev, 0) ) AS sum FROM credits WHERE status = 0 GROUP BY uid ) AS credits_pending ON credits_pending.uid = info.uid LEFT JOIN ( SELECT uid, SUM( COALESCE(amount_reward, 0) + COALESCE(amount_bonus, 0) + COALESCE(amount_dev, 0) ) AS sum FROM credits WHERE status = 1 GROUP BY uid ) AS credits_matured ON credits_matured.uid = info.uid LEFT JOIN ( SELECT uid, SUM( COALESCE(payments.amount_paid, 0) + COALESCE(payments.amount_fee, 0) ) AS sum FROM payments WHERE status <> -1 GROUP BY uid ) AS debits ON debits.uid = info.uid """ database.execute(query) return database.fetchall() except database.psycopg2.Error as e: raise Exception(e.pgerror) from None except Exception as e: log.error('Failed to get balances and thresholds') log.error(e) return [] def make_payments(): """Pay payments based on credits""" # i.e. [ { uid, addr_type, amount, address }, ... ] payments = [] now = database.walltime_to_db_time(time()) users = get_balances_and_thresholds() total_matured = 0 total_pending = 0 log.message('Building list of payments') for user in users: uid, wallet_addr, payment_threshold, credits_pending, credits_matured, debits = user confirmed_balance = credits_matured - debits total_matured += confirmed_balance total_pending += credits_pending if confirmed_balance < payment_threshold: continue # Limit the amount to pay to PAYMENTS_MAX_PAYMENT_AMOUNT because if # it is a really large amount, will get "tx not possible" amount_to_pay = min(confirmed_balance, PAYMENTS_MAX_PAYMENT_AMOUNT) wallet_info = validate(wallet_addr, COIN_ADDRESS_PREFIXES) if not wallet_info['valid']: log.error('User with uid %d has an invalid address %s, skipping...' % (uid, wallet_addr)) continue # Append to payments array payments.append({ 'uid': uid, 'addr_type': wallet_info['type'], 'amount': amount_to_pay, 'address': wallet_addr }) # sort payments by lowest amount first payments = sorted(payments, key=lambda k: k['amount']) log.message('Building list of payments... DONE') if not len(payments): log.message('No payments need to be made now') balance, unlocked_balance = wallet.get_balance() net_difference = balance - int(total_matured+total_pending) log.message('') log.message('Accounting check') log.message('Wallet:') log.message('==========================================================') log.message('| balance | unlocked | locked |') log.message('==========================================================') log.message('|%s|%s|%s|' % (str(balance).rjust(18), str(unlocked_balance).rjust(18), str(int(balance-unlocked_balance)).rjust(18))) log.message('==========================================================') log.message('') log.message('Owed to users:') log.message('==========================================================') log.message('| total | confirmed | unconfirmed |') log.message('==========================================================') log.message('|%s|%s|%s|' % (str(int(total_matured+total_pending)).rjust(18), str(total_matured).rjust(18), str(total_pending).rjust(18))) log.message('==========================================================') log.message('') log.message('Net (balance - owed): %d' % (net_difference,)) log.message('') if net_difference < -1 * PAYMENTS_WARNING_THRESHOLD: log.error('We owe more than we have in the wallet, quitting...') raise CriticalPaymentError() out_of_money = False # Continue building transactions until we run out of money or payees while not out_of_money and len(payments): balance, unlocked_balance = wallet.get_balance() log.message('Building transaction') log.message('Wallet has unlocked balance of: %d' % (unlocked_balance)) # payments that will be made in this transaction recipients = [] running_total = 0 if payments[0]['addr_type'] == 'integrated': log.message('This will be an exchange payment') if payments[0]['amount'] <= unlocked_balance: log.message('We have enough money') running_total = payments[0]['amount'] recipients = payments.pop(0) else: log.message('We do not have enough money') out_of_money = True break else: log.message('This will be a normal payment') i = 0 while len(recipients) < PAYMENTS_MAX_RECIPIENTS and i < len(payments): if payments[i]['addr_type'] == 'integrated': i += 1 continue if running_total + payments[i]['amount'] <= unlocked_balance: running_total += payments[i]['amount'] recipients.append(payments.pop(i)) else: out_of_money = True break if not out_of_money: log.message('We have enough money') elif len(recipients): log.message('We have enough money for partial payment') else: log.message('We do not have enough money') break log.message('Attempting transaction to pay %d users a total of %d' % (len(recipients), running_total)) fee_estimated = PAYMENTS_FEE_ADJ_FACTOR * fee.estimate_fee(recipients) fee_per_user = fee.split_fee(fee_estimated, len(recipients)) # this will hold recipient info with only amount and address for RPC recipients_rpc = [] for recipient in recipients: # subtract estimated fee for each user recipient['amount'] = int(recipient['amount'] - fee_per_user) # push this address into the wallet rpc list recipients_rpc.append({ 'amount': recipient['amount'], 'address': recipient['address'] }) # Make the actual transfer try: result = wallet.transfer(recipients_rpc) txid = result['tx_hash'] fee_actual = result['fee'] fee_actual_per_user = fee.split_fee(fee_actual, len(recipients)) log.message('Transaction success with txid %s' % (txid,)) log.message('Estimated fee - actual fee: %s - %s = %s' % (fee_estimated, fee_actual, fee_estimated - fee_actual)) except rpc.RpcError as re: log.error('Error transferring payment, reason: %s' % (re,)) log.error(recipients) # If RPC failed, we will still record debit with estimated fee and empty txid txid = None fee_actual_per_user = fee_per_user for recipient in recipients: uid = recipient['uid'] amount = recipient['amount'] # record payment and fee log.message('Debit user %s (amount, fee): %s %s' % (uid, amount, fee_actual_per_user)) if not record_payment(uid, txid, now, amount, fee_actual_per_user): log.error('Critical: failed to record payment for user %d' % (uid,)) raise CriticalPaymentError() def unlock(): # get all payments in at-risk zone from db payments_db = get_pending_payments() if len(payments_db) == 0: # nothing to do return # calculate at-risk zone start_height, wallet_height = wallet.get_at_risk_zone() log.message('Wallet height is %s, at risk zone is %s' % (wallet_height, start_height)) # get all payments in at-risk zone from rpc payments_rpc = wallet.get_outgoing_transfers(start_height) # dict to hold txids and their destinations # If we don't have a txid in db, use this to match by amount txids = {} for payment in payments_rpc: txids[payment['txid']] = { 'destinations': payment['destinations'], 'fee_per_user': fee.split_fee(payment['fee'], len(payment['destinations'])) } # get the tx info from daemon transactions = daemon.get_transactions(list(txids.keys())) # dict to hold tx hashes and their txid tx_hashes = {} # calculate txhash from all payments in rpc for transaction in transactions: txid = transaction['tx_hash'] block_height = transaction.get('block_height', 0) tx_as_json = json.loads(transaction['as_json']) # concats all the output key images in this tx k_image_concat = ''.join([vout['target']['key'] for vout in tx_as_json['vout']]) # hash key images with sha256 tx_hash = hashlib.sha256(bytes.fromhex(k_image_concat)).hexdigest() tx_hashes[tx_hash] = [txid, block_height] needs_rescan = False # loop though pending payments from db for payment in payments_db: pymt_id, tx_hash, txid, amount_paid = payment if txid is None: # we had an error submitting payment, skip for now # we will try and match this up by amount later continue if tx_hash is None: # we just submitted this payment, find the tx hash for key, val in tx_hashes.items(): if val[0] == txid: tx_hash = key break if tx_hash is None: # if new tx is not found in daemon, we need to skip for now log.error('Payment id %d with txid %s not found in daemon immediately after payment' % (pymt_id, txid)) continue else: update_payment_status(pymt_id, txid, tx_hash, PAYMENT_STATUS_PENDING) if tx_hash in tx_hashes: # we are still seeing this tx txid_new, block_height = tx_hashes[tx_hash] # delete txid_new from txids dict to mark it as accounted for if txid_new in txids: txids.pop(txid_new) # transaction malleability check if txid != txid_new: log.message('Transaction malleability check warning, txid %s -> %s' % (txid, txid_new)) update_payment_status(pymt_id, txid_new, tx_hash, PAYMENT_STATUS_PENDING) # check if tx is matured if block_height != 0 and wallet_height > block_height + BLOCK_MATURE_DEPTH: log.message('Transaction matured, txid %s' % (txid,)) update_payment_status(pymt_id, txid, tx_hash, PAYMENT_STATUS_MATURED) else: # this tx is orphaned log.message('Transaction orphaned, txid %s' % (txid,)) update_payment_status(pymt_id, txid, tx_hash, PAYMENT_STATUS_ORPHANED) needs_rescan = True # Loop through payments_db again looking for payments that have an null txid # at this point, we have removed any accounted for payments from txids dict for payment in payments_db: pymt_id, tx_hash, txid, amount_paid = payment fee_per_user = None if not txid is None: # we only care about payments without a txid continue log.message('Payment %s has null txid, attempting to fix' % (pymt_id,)) # Search through txid dict to match this payment by amount_paid for txid_search in txids: for recipient in txids[txid_search]['destinations']: if amount_paid == recipient['amount']: txid = txid_search fee_per_user = txids[txid_search]['fee_per_user'] break if txid is None: # we still have no txid, skip log.error('Cannot find txid for null payment %s, might have failed completely' % (pymt_id,)) continue log.message('Found txid for null payment %s, %s' % (pymt_id,txid)) # Find the tx_hash for key, val in tx_hashes.items(): if val[0] == txid: tx_hash = key break if tx_hash is None: # if new tx is not found in daemon, we need to skip for now log.error('Payment id %d with txid %s not found in daemon immediately after payment' % (pymt_id, txid)) continue update_failed_payment_status(pymt_id, txid, tx_hash, fee_per_user) if needs_rescan: log.message('Rescanning wallet') if wallet.rescan_bc(): log.message('Rescan complete') else: log.error('Rescan error')
# -*- coding: utf-8 -*- """Advent of Code 2021 - Day 11: Dumbo Octopus.""" from copy import deepcopy from aoclib.geometry import Position from aoclib.helpers import timing def load_and_parse_input(input_file: str): puzzle: list[list[int]] = [] with open(input_file) as inf: for line in inf.readlines(): puzzle.append(list(map(int, list(line.strip())))) return puzzle def neighbors(pos: Position) -> list[Position]: """Return all neighbor positions for the given position.""" neighbors: list[Position] = [] # if we aren't on the top row, add the position above if pos.y > 0: neighbors.append(Position(pos.x, pos.y - 1)) # if we aren't on the top left corner, add the position up and to the left if pos.y > 0 and pos.x > 0: neighbors.append(Position(pos.x - 1, pos.y - 1)) # if we aren't on the bottom row, add the position below if pos.y < 9: neighbors.append(Position(pos.x, pos.y + 1)) # if we aren't on the top right corner, add the position up and to the right if pos.y > 0 and pos.x < 9: neighbors.append(Position(pos.x + 1, pos.y - 1)) # if we aren't on the left column, add the position to the left if pos.x > 0: neighbors.append(Position(pos.x - 1, pos.y)) # if we aren't on the bottom left corner, add the position down and to the left if pos.x > 0 and pos.y < 9: neighbors.append(Position(pos.x - 1, pos.y + 1)) # if we aren't on the right column, add the position to the right if pos.x < 9: neighbors.append(Position(pos.x + 1, pos.y)) # if we aren't on the bottom right corner, add the position down and to the right if pos.x < 9 and pos.y < 9: neighbors.append(Position(pos.x + 1, pos.y + 1)) return neighbors def part_1(puzzle): octopi: list[list[int]] = deepcopy(puzzle) total_flashes: int = 0 for _ in range(100): # increase the energy level of every octopus by 1 for row in range(10): for col in range(10): octopi[row][col] += 1 # loop to handle any flashes flashed = True while flashed: flashed = None for row in range(10): for col in range(10): if octopi[row][col] is not None and octopi[row][col] > 9: # flashy flashy flashed = True total_flashes += 1 # remove from possible flashing octopi octopi[row][col] = None # increase energy of all neighbors for neighbor in neighbors(Position(col, row)): if octopi[neighbor.y][neighbor.x] is not None: octopi[neighbor.y][neighbor.x] += 1 # reset any octopi that flashed to have an energy level of zero for row in range(10): for col in range(10): if octopi[row][col] is None: octopi[row][col] = 0 return total_flashes def part_2(puzzle): octopi: list[list[int]] = deepcopy(puzzle) all_flashed: int = 0 while True: all_flashed += 1 # increase the energy level of every octopus by 1 for row in range(10): for col in range(10): octopi[row][col] += 1 # loop to handle any flashes flashed = True while flashed: flashed = None for row in range(10): for col in range(10): if octopi[row][col] is not None and octopi[row][col] > 9: # flashy flashy flashed = True # remove from possible flashing octopi octopi[row][col] = None # increase energy of all neighbors for neighbor in neighbors(Position(col, row)): if octopi[neighbor.y][neighbor.x] is not None: octopi[neighbor.y][neighbor.x] += 1 # reset any octopi that flashed to have an energy level of zero for row in range(10): for col in range(10): if octopi[row][col] is None: octopi[row][col] = 0 # did all octopi flash? (energy level back to zero) sums = [sum(row) for row in zip(*octopi)] if sum(sums) == 0: return all_flashed # we should never get here return None def solve(puzzle): with timing("part_1"): one = part_1(puzzle) with timing("part_2"): two = part_2(puzzle) return (one, two) if __name__ == "__main__": expected = (1723, 327) puzzle = load_and_parse_input("inputs/11.txt") solution = solve(puzzle) print(solution) assert expected == solution, f"expected: {expected} actual: {solution}"
<filename>transcode/pyqtgui/qzones.py from PyQt5.QtGui import QIcon from PyQt5.QtCore import QTime, pyqtSignal, pyqtSlot from PyQt5.QtWidgets import (QAction, QVBoxLayout, QHBoxLayout, QScrollArea, QPushButton, QLabel, QWidget, QGridLayout, QComboBox, QMessageBox) from numpy import concatenate from .qframeselect import QFrameSelect from .qimageview import QImageView from .qfilterconfig import QFilterConfig from ..filters.video.zoned import Zone from ..filters.base import BaseFilter from av import VideoFrame class ZoneDlg(QFilterConfig): zonename = "Zone" zoneChanged = pyqtSignal(Zone) contentsModified = pyqtSignal() def __init__(self, *args, **kwargs): self.filtercopy = None self.zonecopy = None self._mode = 2 super().__init__(*args, **kwargs) self.toggleZoneAct = QAction("&Toggle", self, shortcut="Ctrl+T", triggered=self.toggleZone) self.addAction(self.toggleZoneAct) def _createControls(self): self.setWindowTitle(self.title) layout = QVBoxLayout(self) self.setLayout(layout) self.sourceWidget = QWidget(self) self.sourceSelection = self.createSourceControl(self.sourceWidget) self.sourceSelection.currentDataChanged.connect(self.setFilterSource) srclayout = QHBoxLayout() srclayout.addWidget(QLabel("Source: ", self.sourceWidget)) srclayout.addWidget(self.sourceSelection) self.sourceWidget.setLayout(srclayout) layout.addWidget(self.sourceWidget) self._createImageView() self._createZoneNavControls() self._createZoneControls() self._createZoneButtons() self._createGlobalControls() self._createDlgButtons() def _prevChanged(self, source): if source is None: self.slider.setPtsTimeArray(None) else: self.slider.setPtsTimeArray(source.pts_time) self.loadFrame(self.slider.slider.value(), self.slider.currentTime.time()) def _createImageView(self, layout=None, index=None): if layout is None: layout = self.layout() self.imageView = QImageView(self) if isinstance(layout, QGridLayout): layout.addWidget(self.imageView, *index) elif isinstance(layout, QScrollArea): layout.setWidget(self.imageView) elif index is not None: layout.insertWidget(index, self.imageView) else: layout.addWidget(self.imageView) def _createZoneNavControls(self, layout=None, index=None): if layout is None: layout = self.layout() self.prevBtn = QPushButton(self) self.prevBtn.setToolTip(f"Previous {self.zonename}") self.prevBtn.setIcon(QIcon.fromTheme("go-previous")) self.prevBtn.clicked.connect(self.prevZone) self.slider = QFrameSelect(self) self.slider.setStartEndVisible(True) self.slider.frameSelectionChanged.connect(self.loadFrame) self.nextBtn = QPushButton(self) self.nextBtn.setToolTip(f"Next {self.zonename}") self.nextBtn.setIcon(QIcon.fromTheme("go-next")) self.nextBtn.clicked.connect(self.nextZone) self._createModeBox() self.modeBox.currentIndexChanged.connect(self.setMode) self.toggleZoneBtn = QPushButton(f"&Insert {self.zonename} Here", self) self.toggleZoneBtn.clicked.connect(self.toggleZone) # 162523 sublayout1 = QHBoxLayout() sublayout1.addWidget(self.prevBtn) sublayout1.addWidget(self.slider) sublayout1.addWidget(self.nextBtn) sublayout3 = QHBoxLayout() if len(self.modeBox) > 1: sublayout3.addWidget(self.modeBox) else: self.modeBox.setHidden(True) sublayout3.addWidget(self.toggleZoneBtn) if isinstance(layout, QGridLayout): suplayout = QVBoxLayout() suplayout.addLayout(sublayout1) suplayout.addLayout(sublayout3) layout.addLayout(suplayout, *index) elif index is not None: layout.insertLayout(index, sublayout1) layout.insertLayout(index+1, sublayout3) else: layout.addLayout(sublayout1) layout.addLayout(sublayout3) def _createModeBox(self): self.modeBox = comboBox = QComboBox(self) comboBox.addItem("Original", 0) comboBox.addItem("Input", 1) comboBox.addItem("Output", 2) comboBox.setCurrentIndex(2) def _createZoneControls(self, layout=None, index=None): pass def _createZoneButtons(self, layout=None, index=None, layoutcls=QHBoxLayout): self.applyZoneBtn = QPushButton("Apply Zone Settings", self) self.resetZoneBtn = QPushButton("Reset Zone Settings", self) self.applyZoneBtn.clicked.connect(self.applyZone) self.resetZoneBtn.clicked.connect(self.resetZone) if layout is None: layout = self.layout() sublayout = layoutcls() sublayout.addStretch() sublayout.addWidget(self.applyZoneBtn) sublayout.addWidget(self.resetZoneBtn) if isinstance(layout, QGridLayout): layout.addLayout(sublayout, *index) elif index is not None: layout.insertLayout(index, sublayout) else: layout.addLayout(sublayout) def _resetControls(self): self.setZone(self.filtercopy[0]) self._resetGlobalControls() def setMode(self, mode): m = n = self.slider.slider.value() if self.zone: if self._mode == 0: if self.modeBox.currentData() == 1: if isinstance(self.filtercopy.prev, BaseFilter): idxmap = self.filtercopy.prev.cumulativeIndexMap else: idxmap = None if self.modeBox.currentData() == 2: idxmap = self.filtercopy.cumulativeIndexMap elif self._mode == 1: if self.modeBox.currentData() == 0: if isinstance(self.filtercopy.prev, BaseFilter): idxmap = self.filtercopy.prev.cumulativeIndexReverseMap else: idxmap = None if self.modeBox.currentData() == 2: idxmap = self.filtercopy.indexMap elif self._mode == 2: if self.modeBox.currentData() == 0: if isinstance(self.filtercopy, BaseFilter): idxmap = self.filtercopy.cumulativeIndexReverseMap else: idxmap = None if self.modeBox.currentData() == 1: idxmap = self.filtercopy.reverseIndexMap if idxmap is None: m = n elif n > 0: m = idxmap[n-1] + 1 else: m = idxmap[n] self._mode = self.modeBox.currentData() self._updateSliderInterval() self.slider.slider.setValue(m) self.loadFrame(m, QTime()) def _createGlobalControls(self, layout=None, index=None): pass def _createDlgButtons(self, layout=None, index=None, layoutcls=QHBoxLayout): if layout is None: layout = self.layout() self.okayBtn = QPushButton("&Okay", self) self.okayBtn.setDefault(True) self.okayBtn.clicked.connect(self.applyAndClose) self.applyBtn = QPushButton("&Apply", self) self.applyBtn.clicked.connect(self.apply) self.resetBtn = QPushButton("&Reset", self) self.resetBtn.clicked.connect(self.reset) self.closeBtn = QPushButton("&Close", self) self.closeBtn.clicked.connect(self.close) sublayout = layoutcls() sublayout.addStretch() sublayout.addWidget(self.okayBtn) sublayout.addWidget(self.applyBtn) sublayout.addWidget(self.resetBtn) sublayout.addWidget(self.closeBtn) if isinstance(layout, QGridLayout): layout.addLayout(sublayout, *index) elif index is not None: layout.insertLayout(index, sublayout) else: layout.addLayout(sublayout) def _updateSliderInterval(self): self.slider.blockSignals(True) if self._mode == 0: self.slider.setPtsTimeArray(self.filtercopy.source.pts_time) self.slider.setMinimum(self.zonecopy.src_start) self.slider.setMaximum(self.zonecopy.src_end - 1) if self._mode == 1: self.slider.setPtsTimeArray(self.filtercopy.prev.pts_time) self.slider.setMinimum(self.zonecopy.prev_start) self.slider.setMaximum(self.zonecopy.prev_end - 1) if self._mode == 2: pts_time = self.filtercopy.pts_time if self.zonecopy is not None: pts_time = concatenate(( pts_time[:self.zonecopy.dest_start], self.zonecopy.pts_time, pts_time[self.zonecopy.dest_end:] )) self.slider.setPtsTimeArray(pts_time) self.slider.setMinimum(self.zonecopy.dest_start) self.slider.setMaximum(self.zonecopy.dest_end - 1) self.slider.blockSignals(False) def setZone(self, zone): self.zone = zone if zone is not None: self.zonecopy = zone.copy() if self.filtercopy.prev is not None: self._updateSliderInterval() self.slider.slider.setValue(self.slider.slider.minimum()) self.loadFrame( self.slider.slider.minimum(), self.slider.currentTime.minimumTime() ) self.prevBtn.setDisabled(zone.prev is None) self.nextBtn.setDisabled(zone.next is None) self._resetZoneControls() self.zoneNotModified() self.zoneChanged.emit(zone) else: self.prevBtn.setDisabled(True) self.nextBtn.setDisabled(True) def _resetZoneControls(self): pass def _resetGlobalControls(self): pass @pyqtSlot() def toggleZone(self): n = self.slider.slider.value() newzone = (self._mode == 0 and n > self.zonecopy.src_start) or \ (self._mode == 1 and n > self.zonecopy.prev_start) or \ (self._mode == 2 and n > self.zonecopy.dest_start) if newzone and self.zonemodified: answer = self.askApplyZone() if answer == QMessageBox.Yes: self.applyZone() elif answer == QMessageBox.Cancel: return elif not newzone and self.askRemoveZone() == QMessageBox.No: return if self._mode == 0: if n > self.zonecopy.src_start: J, zone = self.filtercopy.insertZoneAt(int(n)) self.setZone(zone) else: prevzone = self.zonecopy.prev self.filtercopy.removeZoneAt(self.zonecopy.src_start) self.setZone(prevzone) self.slider.slider.setValue(n) elif self._mode == 1: if n > self.zone.prev_start: if isinstance(self.filtercopy.prev, BaseFilter): if n > 0: m = (self.filtercopy.prev.cumulativeIndexReverseMap [n-1] + 1) else: m = self.filtercopy.prev.cumulativeIndexReverseMap[n] else: m = n J, zone = self.filtercopy.insertZoneAt(int(m)) self.setZone(zone) else: prevzone = self.zone.prev self.filtercopy.removeZoneAt(self.zonecopy.src_start) self.setZone(prevzone) self.slider.setValue(n) elif self._mode == 2: if n > self.zone.dest_start: if n > 0: m = self.filtercopy.cumulativeIndexReverseMap[n-1] + 1 else: m = self.filtercopy.cumulativeIndexReverseMap[n] J, zone = self.filtercopy.insertZoneAt(int(m)) self.setZone(zone) else: prevzone = self.zone.prev self.filtercopy.removeZoneAt(self.zonecopy.src_start) self.setZone(prevzone) self.slider.setValue(n) self.isModified() @pyqtSlot() def prevZone(self): if self.zonemodified: answer = self.askApplyZone() if answer == QMessageBox.Yes: self.applyZone() elif answer == QMessageBox.Cancel: return self.setZone(self.zone.prev) @pyqtSlot() def nextZone(self): if self.zonemodified: answer = self.askApplyZone() if answer == QMessageBox.Yes: self.applyZone() elif answer == QMessageBox.Cancel: return self.setZone(self.zone.next) @pyqtSlot(int, QTime) def loadFrame(self, n, t): if n is None: n = self.slider.slider.value() if self._mode == 0: frame = next(self.filtercopy.source.iterFrames( n, whence="framenumber")) if n > self.zonecopy.src_start: self.toggleZoneBtn.setText(f"&Insert {self.zonename} Here") else: self.toggleZoneBtn.setText(f"&Remove {self.zonename} Here") elif self._mode == 1: frame = next(self.filtercopy.prev.iterFrames( n, whence="framenumber")) if n > self.zone.prev_start: self.toggleZoneBtn.setText(f"&Insert {self.zonename} Here") else: self.toggleZoneBtn.setText(f"&Remove {self.zonename} Here") elif self._mode == 2: frame = self.generatePreview(n) if n > self.zonecopy.dest_start: self.toggleZoneBtn.setText(f"&Insert {self.zonename} Here") else: self.toggleZoneBtn.setText(f"&Remove {self.zonename} Here") im = frame.to_rgb().to_image() pixmap = im.toqpixmap() self.imageView.setFrame(pixmap) self.toggleZoneBtn.setEnabled(n > 0) def generatePreview(self, n): try: return next(self.zonecopy.iterFrames(n)) except StopIteration: return VideoFrame( width=self.filtercopy.width, height=self.filtercopy.height) @pyqtSlot() def applyZone(self): self.zone.__setstate__(self.zonecopy.__getstate__()) self.zoneNotModified() self.isModified() @pyqtSlot() def resetZone(self): self.zonecopy.__setstate__(self.filter.__getstate__()) self._resetZoneControls() self.zoneNotModified() def zoneModified(self): self.zonemodified = True if hasattr(self, "applyZoneBtn") and self.applyZoneBtn is not None: self.applyZoneBtn.setDisabled(False) if hasattr(self, "resetZoneBtn") and self.resetZoneBtn is not None: self.resetZoneBtn.setDisabled(False) if self.filter is not self.filtercopy: self.isModified() def zoneNotModified(self): self.zonemodified = False if hasattr(self, "applyZoneBtn") and self.applyZoneBtn is not None: self.applyZoneBtn.setDisabled(True) if hasattr(self, "resetZoneBtn") and self.resetZoneBtn is not None: self.resetZoneBtn.setDisabled(True) def apply(self): if self.zonemodified: answer = self.askApplyZone() if answer == QMessageBox.Yes: self.applyZone() elif answer == QMessageBox.Cancel: return super().apply() def closeEvent(self, event): if self.zonemodified and not self.modified: answer = self.askApplyZone() if answer == QMessageBox.Yes: self.applyZone() elif answer == QMessageBox.Cancel: event.ignore() return super().closeEvent(event) def askApplyZone(self): return QMessageBox.question( self, "Apply zone settings?", "Do you wish to apply zone settings?", QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel) def askRemoveZone(self): return QMessageBox.question( self, "Remove?", "Are you sure you wish to remove zone?", QMessageBox.Yes | QMessageBox.No)
<reponame>BUT-GRAPH-at-FIT/Automatic-Camera-Calibration import tensorflow as tf import numpy as np import sys def fun(paramsVec): return tf.reduce_prod(paramsVec*paramsVec, axis=1) def funND(paramsVec): return tf.reduce_prod(paramsVec*paramsVec, axis=2) def my_differential_evolution_single(func, bounds, params, iter=1000, max_same_iter=20, popsize=15, mutation=(0.5, 1.0), recombination=0.7, disp=False): dimensions = len(bounds) while popsize % 3 != 0: popsize += 1 population = tf.convert_to_tensor(np.random.rand(popsize, dimensions), dtype=tf.float64) bounds = tf.convert_to_tensor(bounds, dtype=tf.float64) min_bounds = tf.minimum(bounds[:,0], bounds[:,1]) max_bounds = tf.maximum(bounds[:,0], bounds[:,1]) diff = tf.abs(min_bounds - max_bounds) population_denorm = min_bounds + population * diff fitness = func(population_denorm, params) cost_value = sys.float_info.max cost_iter = 0 for i in range(iter): random_trio_1 = tf.reshape(tf.gather(population, tf.random.shuffle(tf.range(popsize))), shape=[-1, 3, dimensions]) random_trio_2 = tf.reshape(tf.gather(population, tf.random.shuffle(tf.range(popsize))), shape=[-1, 3, dimensions]) random_trio_3 = tf.reshape(tf.gather(population, tf.random.shuffle(tf.range(popsize))), shape=[-1, 3, dimensions]) mutation_trios = tf.concat([random_trio_1, random_trio_2, random_trio_3], axis=0) vectors_1, vectors_2, vectors_3 = tf.unstack(mutation_trios, axis=1, num=3) mutants = vectors_1 + tf.convert_to_tensor(np.random.uniform(mutation[0], mutation[1]), dtype=tf.float64) * (vectors_2 - vectors_3) mutants = tf.clip_by_value(mutants, 0, 1) crossover_probabilities = tf.convert_to_tensor(np.random.rand(popsize, dimensions), dtype=tf.float64) trial_population = tf.where(crossover_probabilities < recombination, x=mutants, y=population) trial_denorm = min_bounds + trial_population * diff trial_fitness = func(trial_denorm, params) cond = tf.tile(tf.expand_dims(trial_fitness < fitness, -1), [1, tf.shape(population)[1].numpy()]) population = tf.where(cond, x=trial_population, y=population) fitness = tf.where(trial_fitness < fitness, x=trial_fitness, y=fitness) act_cost = int(round(tf.gather(fitness, tf.argmin(fitness)).numpy())) if act_cost < cost_value: cost_value = act_cost cost_iter = i if (i - cost_iter) > max_same_iter: break if disp: print('Iteration {:04d} / {:04d}, error {:.6f}'.format(i+1, iter, tf.gather(fitness, tf.argmin(fitness)).numpy()))#tf.gather(fitness, tf.argmin(fitness)), min_bounds + tf.gather(population, tf.argmin(fitness)) * diff) population_denorm = min_bounds + population * diff return tf.gather(population_denorm, tf.argmin(fitness)) def my_differential_evolution(func, bounds, params, iter=1000, max_same_iter=20, popsize=15, mutation=(0.5, 1.0), recombination=0.7, disp=False, batch_size=3): dimensions = len(bounds) while popsize % 3 != 0: popsize += 1 population = tf.convert_to_tensor(np.random.rand(batch_size, popsize, dimensions), dtype=tf.float64) bounds = tf.convert_to_tensor(bounds, dtype=tf.float64) min_bounds = tf.minimum(bounds[:,0], bounds[:,1]) max_bounds = tf.maximum(bounds[:,0], bounds[:,1]) diff = tf.abs(min_bounds - max_bounds) population_denorm = min_bounds + population * diff fitness = func(population_denorm, params) idxs = tf.reshape(tf.tile(tf.reshape(tf.range(batch_size), [-1,1]), [1,popsize]), [batch_size*popsize,1]) cost_value = sys.float_info.max cost_iter = 0 for i in range(iter): idxs_rand_1 = tf.reshape(tf.tile(tf.reshape(tf.random.shuffle(tf.range(popsize)), [1,-1]), [batch_size,1]), [batch_size*popsize,1]) random_trio_1 = tf.reshape(tf.gather_nd(population, tf.concat((idxs, idxs_rand_1), 1)), [batch_size, -1, 3, dimensions]) idxs_rand_2 = tf.reshape(tf.tile(tf.reshape(tf.random.shuffle(tf.range(popsize)), [1,-1]), [batch_size,1]), [batch_size*popsize,1]) random_trio_2 = tf.reshape(tf.gather_nd(population, tf.concat((idxs, idxs_rand_2), 1)), [batch_size, -1, 3, dimensions]) idxs_rand_3 = tf.reshape(tf.tile(tf.reshape(tf.random.shuffle(tf.range(popsize)), [1,-1]), [batch_size,1]), [batch_size*popsize,1]) random_trio_3 = tf.reshape(tf.gather_nd(population, tf.concat((idxs, idxs_rand_3), 1)), [batch_size, -1, 3, dimensions]) mutation_trios = tf.concat([random_trio_1, random_trio_2, random_trio_3], axis=1) vectors_1, vectors_2, vectors_3 = tf.unstack(mutation_trios, axis=2, num=3) mutants = vectors_1 + tf.convert_to_tensor(np.random.uniform(mutation[0], mutation[1]), dtype=tf.float64) * (vectors_2 - vectors_3) mutants = tf.clip_by_value(mutants, 0, 1) crossover_probabilities = tf.convert_to_tensor(np.random.rand(batch_size, popsize, dimensions), dtype=tf.float64) trial_population = tf.where(crossover_probabilities < recombination, x=mutants, y=population) trial_denorm = min_bounds + trial_population * diff trial_fitness = func(trial_denorm, params) use_trial = tf.tile(tf.expand_dims(trial_fitness < fitness, -1), (1,1,dimensions)) population = tf.where(use_trial, x=trial_population, y=population) fitness = tf.where(trial_fitness < fitness, x=trial_fitness, y=fitness) act_cost = int(round(np.mean(tf.gather(fitness, tf.argmin(fitness)).numpy()))) if act_cost < cost_value: cost_value = act_cost cost_iter = i if (i - cost_iter) > max_same_iter: break if disp: # print(tf.gather(fitness, tf.argmin(fitness)), tf.gather(population, tf.argmin(fitness))) print('Iteration inner {:04d} / {:04d}, error {:.6f}'.format(i+1, iter, np.mean(tf.gather(fitness, tf.argmin(fitness)).numpy()))) population_denorm = min_bounds + population * diff return tf.gather_nd(population_denorm, tf.concat((tf.reshape(tf.range(batch_size), [-1,1]), tf.reshape(tf.cast(tf.argmin(fitness, axis=1), tf.int32), [-1,1])), 1))
<gh_stars>0 """ Module containing the SecureStompMessenger class, used for communicating using the STOMP protocol. """ from encrypt_utils import encrypt_message, decrypt_message, verify_message, \ sign_message, verify_certificate, from_file, get_certificate_subject, \ check_cert_key, message_hash import get_brokers import stomp import stomp.exception import os from threading import Thread, Lock import socket import base64 import logging import time import zlib log = logging.getLogger('SSM') ################################################################################ # Constants that are unlikely to change # Times in seconds MSG_CHECK_TIME = 1 ACK_WAIT_TIME = 60 CONNECTION_TIMEOUT = 5 # Times in minutes PING_TIMER = 2 REFRESH_BROKERS = 1440 CLEAR_CONSUMER_CERT = 1440 NO_MSG_ID = 'noid' MSG_REJECT = 'REJECTED' SSM_ID_HEADER = 'ssm-msg-id' SSM_REACK_HEADER = 'ssm-reack' SSM_MSG_TYPE = 'ssm-msg-type' SSM_NORMAL_MSG = 'msg' SSM_ACK_MSG = 'ack' SSM_PING_MSG = 'ping' SSM_CERT_REQ_MSG = 'certreq' SSM_CERT_RESP_MSG = 'certresp' # End of constants ################################################################################ SSM_LOGGER_ID = 'SSM' ################################################################################ # Dummy classes used for configuration data class Config: pass class ProducerConfig: pass class ConsumerConfig: pass # ################################################################################ class SsmException(Exception): """ Exception class for use by the SSM. """ pass class SecureStompMessenger(object): """ Secure Stomp Messenger class, implementing a *reliable* message transfer function over Stomp protocol. Listens to a nominated topic/queue for incoming messages, which are signed and encrypted. Messages are handled by a MessageDB object (see message_db.py for a basic model that uses the filesystem). Also implements an acknowledgment mechanism to ensure that message delivery has occurred at the consuming end, and a Re-Acknowledgment to allow a consumer to verify that an ack has been recieved by the producer, so we can stop worrying about duplicated messages arriving. """ def __init__(self, messagedb, configuration, producer_config, consumer_config): """ Given a messagedb object and configuration objects, create an SSM. """ # The stomppy connection object self._connection = None self._received = 0 # message counter # this is the identifier of the last message that we ack'd self._last_acked = None # this is the md5 sum of the previous message that was ack'd, and also # the re-ack value we need to send as part of our next message. self._reack = None # how often the SSM will check the outgoing directory for messages # to check. This may be changed in the configuration. self.msg_check_time = MSG_CHECK_TIME # a string representing the PEM encoded certificate of the consumer, we # use this certificate during the encryption process self._consumer_certificate = None # A separate thread to do various regular tasks every # so often without interrupting control flow. self._admin_thread = None # A list of the valid DNs we have available. It should be used only # when the lock below is acquired. self._valid_dns = [] # A Lock object so that the _valid_dns list is never used by two threads # at the same time. self._valid_dn_lock = Lock() # How often we reset the host certificate, meaning that it must be # requested again. self._clear_cert_interval = CLEAR_CONSUMER_CERT # How often we reload the valid DNs file, so it can be updated separately # and a running SSM will pick up the changes. self._read_dn_interval = None # How often we request the list of brokers from the BDII. self._fetch_brokers_interval = REFRESH_BROKERS # List of (host, port) tuples of brokers found in the BDII self._network_brokers = [] # A Lock object so that the _network_brokers list is never used by # two threads at the same time. self._network_brokers_lock = Lock() # Host and port of the currently-used broker. self._broker_host = None self._broker_port = None self._messagedb = messagedb self._configuration = configuration self._producer_config = producer_config self._consumer_config = consumer_config self._check_config() # Whether the SSM will run as a daemon or not. self._daemon = configuration.daemon # Connection tracking. This is set to True whenever on_connected() # is called. self._is_connected = False # TODO: need a more respectable name. self._dead = False self._death_exception = None def _check_config(self): """ Call the check_ssm_config() method on this instance of the SSM. """ check_ssm_config(self._configuration, self._consumer_config, self._producer_config) def startup(self): """ Set up one-off configuration for the SSM. If daemon, start a separate thread to do checks every so often. Finally, start the SSM's connection to a broker via STOMP """ if self._consumer_config is not None: self._read_valid_dns() self._read_dn_interval = self._consumer_config.read_valid_dn_interval try: self.msg_check_time = self._producer_config.msg_check_time except AttributeError: # Keep default value pass self._fetch_brokers() # Connect self._handle_connect() # Finally, set the background thread going to run periodic checks. if self._daemon: self._create_pidfile() self._admin_thread = Thread(target = self._run_admin_thread) # This daemon is separate to our daemon concept. This thread # must always be set as a daemon so that it ends when the process # is killed. self._admin_thread.setDaemon(True) self._admin_thread.start() log.debug("Started the background thread.") def shutdown(self): """ If running as a daemon, remove the pid file. Then close the connection. self._admin_thread MUST be set as a daemon, and so does not need to be handled here. """ if self._daemon: self._remove_pidfile() self._close_connection() def initialise_connection(self): """ Create the self._connection object with the appropriate properties, but don't try to start the connection. """ # abbreviation cfg = self._configuration ssl_key = None ssl_cert = None username = None pwd = None if cfg.use_ssl: ssl_key = cfg.key ssl_cert = cfg.certificate log.info("Connecting using SSL using key %s and cert %s." % (ssl_key, ssl_cert)) if cfg.use_pwd: username = cfg.username pwd = <PASSWORD> log.info("Connecting using password using username %s." % (username)) self._connection = stomp.Connection([(self._broker_host, self._broker_port)], reconnect_sleep_initial = 5, use_ssl = cfg.use_ssl, user = username, passcode = <PASSWORD>, ssl_key_file = ssl_key, ssl_cert_file = ssl_cert) # You can set this in the constructor but only for stomppy version 3. # This works for stomppy 3 but doesn't break stomppy 2. self._connection.__reconnect_attempts_max = 1 self._connection.__timeout = CONNECTION_TIMEOUT self._connection.set_listener('SecureStompMessenger', self) def start_connection(self): """ Once self._connection exists, attempt to start it and subscribe to the relevant topics. If the timeout is reached without receiving confirmation of connection, raise an exception. """ cfg = self._configuration if self._connection is None: raise SsmException("Called start_connection() before a connection \ object was initialised.") self._connection.start() self._connection.connect(wait = False) if self._consumer_config is not None: log.info("The SSM will run as a consumer.") topic = self._consumer_config.listen_to self._connection.subscribe(destination=topic, ack='auto') log.debug('Subscribing as a consumer to: ' + topic) else: log.info("The SSM will not run as a consumer.") if self._producer_config is not None: log.info("The SSM will run as a producer.") cfg = self._producer_config self._connection.subscribe(destination=cfg.ack_queue, ack='auto') log.debug('I will be a producer, my ack queue is: '+cfg.ack_queue) else: log.info("The SSM will not run as a producer.") self._last_acked = None self._reack = None i = 0 while not self._is_connected: time.sleep(0.1) if i > CONNECTION_TIMEOUT * 10: message = "Timed out while waiting for connection. Check the connection details." raise SsmException(message) i += 1 def _close_connection(self): """ Close the connection. This is important because it runs in a separate thread, so it can outlive the main process if it is not ended. """ try: self._connection.disconnect() except (stomp.exception.NotConnectedException, socket.error): self._connection = None except AttributeError: # AttributeError if self._connection is None already pass log.info("SSM connection ended.") def _handle_connect(self): """ Assuming that the SSM has retrieved the details of the broker or brokers it wants to connect to, connect to one. If more than one is in the list self._network_brokers, try to connect to each in turn until successful. """ # If we've got brokers from the BDII. if self._network_brokers: for host, port in self._network_brokers: self._broker_host = host self._broker_port = port self.initialise_connection() try: self.start_connection() break except stomp.exception.ReconnectFailedException, e: # Exception doesn't provide a message. log.warn("Failed to connect to %s:%s." % (host, port)) except SsmException, e: log.warn("Failed to connect to %s:%s: %s" % (host, port, str(e))) else: # Broker host and port specified. self.initialise_connection() try: self.start_connection() except stomp.exception.ReconnectFailedException, e: # Exception doesn't provide a message. log.warn("Failed to connect to %s:%s." % (self._broker_host, self._broker_port)) except SsmException, e: log.error("Failed to connect to %s:%s: %s" % (self._broker_host, self._broker_port, str(e))) if not self._is_connected: raise SsmException("Attempts to start the SSM failed. The system will exit.") def _handle_disconnect(self): """ When disconnected, attempt to reconnect using the same method as used when starting up. """ self._is_connected = False # Shut down properly self._close_connection() # Sometimes the SSM will reconnect to the broker before it's properly # shut down! This prevents that. time.sleep(2) # Try again according to the same logic as the initial startup try: self._handle_connect() except SsmException: self._is_connected = False # If reconnection fails, admit defeat. if not self._is_connected: error_message = "Reconnection attempts failed and have been abandoned." self._death_exception = SsmException(error_message) self._dead = True def _create_pidfile(self): """ Create the pidfile. """ try: f = open(self._configuration.pidfile, "w") f.write(str(os.getpid())) f.write("\n") f.close() return True except IOError, e: log.warn("Failed to create pidfile %s: %s" % (self._configuration.pidfile, e)) return False def _remove_pidfile(self): """ If the pidfile exists, remove it. If it doesn't exist, or there's a problem removing it, log the problem. Returns True if the pidfile is successfully removed. """ pidfile = self._configuration.pidfile try: if os.path.exists(pidfile): os.remove(pidfile) return True else: log.warn("pidfile %s not found." % pidfile) except IOError, e: log.warn("Failed to remove pidfile %s: %e" % (pidfile, e)) log.warn("The SSM may not start again until it is removed.") return False def is_dead(self): """ Whether the 'dead' flag has been set to true. """ return self._dead def get_death_exception(self): """ If the 'dead' flag has been set, this should return the relevant exception. """ return self._death_exception def on_connecting(self, host_and_port): """ Called by stomppy when a connection attempt is made. """ log.info('Connecting: ' + str(host_and_port)) def on_connected(self, unused_headers, unused_body): """ Set connection tracking to connected. Called by stomppy when the connection is acknowledged by the broker. """ log.info('Connected') self._is_connected = True def on_disconnected(self, headers, body): """ Attempt to handle the disconnection. Called by stomppy when a disconnect frame is sent. """ log.warn('Disconnected from broker.') log.debug(headers) log.debug(body) # If we were connected before, try to reconnect. # If we weren't, accept disconnection. if self._is_connected: self._is_connected = False self._handle_disconnect() else: log.warn("Broker refused connection.") def on_send(self, headers, body): """ Called by stomppy when a message is sent. """ pass def on_message(self, headers, body): """ Process an incoming message. The message may contain extra header information causing us to handle it differently. The types of message we handle are normal messages or acknowledgements. Normal messages may contain a re-acknowledgement value, to let us know the producer got our previous ack. """ try: log.debug('Receiving message from: ' + headers['destination']) self._received = (self._received + 1) % 1000 if SSM_REACK_HEADER in headers.keys(): # we got a re-ack from the producer, so we can safely remove the # ack-tracking information for the message log.debug('Got reack ' + headers[SSM_REACK_HEADER]) self._messagedb.clear_message_ack(headers[SSM_REACK_HEADER]) # don't need to do anything with a ping if headers[SSM_MSG_TYPE] == SSM_PING_MSG: return # handle certificate request by responding with our certificate if headers[SSM_MSG_TYPE] == SSM_CERT_REQ_MSG: log.debug('Certificate requested') self._send_message(headers['reply-to'], SSM_CERT_RESP_MSG, NO_MSG_ID, from_file(self._configuration.certificate)) return # handle certificate response if headers[SSM_MSG_TYPE] == SSM_CERT_RESP_MSG: if self._consumer_certificate is None: log.info('Certificate received') try: check_crls = self._configuration.check_crls capath = self._configuration.capath if not verify_certificate(body, capath, check_crls): raise SsmException, 'Certificate failed to verify' if self._producer_config.consumerDN != get_certificate_subject(body): log.error('Expected ' + self._producer_config.consumerDN + ', but got ' + get_certificate_subject(body)) raise SsmException, 'Certificate does not match consumerDN configuration' log.debug(get_certificate_subject(body)) self._consumer_certificate = body except Exception, err: log.warning('Certificate not verified: ' + str(err)) else: log.warning('Unexpected certificate - ignored') return if self._producer_config is not None and headers['destination'] == self._producer_config.ack_queue: # message is an ack to our previous message; the message ID will be # the md5sum of our previously sent message log.debug('Received ack for ' + headers[SSM_ID_HEADER]) self._last_acked = headers[SSM_ID_HEADER] return if headers[SSM_MSG_TYPE] == SSM_NORMAL_MSG: self._decrypt_verify(headers, body) return # Finally, as it's not an expected message type, reject it: log.warn("Unexpected message type received: %s", headers[SSM_MSG_TYPE]) log.warn("Message was ignored.") except KeyError: log.warn("Incoming message did not have the necessary headers.") log.warn("Message was ignored.") except Exception, e: log.error("Unexpected exception while handling incoming message:") log.error("%s: %s" % (type(e), e)) log.error("Message was ignored.") def on_error(self, headers, body): """ Called by stomppy if an error frame is received. """ log.warning('Error frame received.') log.debug("Error frame headers:") log.debug(headers) log.debug(body) def on_receipt(self, headers, unused_body): """ Called by stomppy when a message is received by the broker. """ self._received = self._received + 1 log.debug('Broker received ' + headers['receipt-id']) def process_outgoing(self): """ Pick the first message from the outgoing message queue and send it, waiting for acknowledgement before returning. Raises an exception if the message is not acked. Returns True if a message was sent, or False if no message was sent or if there are no messages to send. """ if not self._is_connected: return False (msg_id, msg_data)= self._messagedb.get_outgoing_message() if msg_id is None: return False # Ensure we've got a valid certificate to encode with if self._consumer_certificate is None: log.info('No certificate, requesting') self._request_certificate() m5 = message_hash(msg_data) log.debug('Hash: ' + m5) # we also need to base64 encode the message, because OpenSSL SMIME changes # CR to CR/LFs in the messages which I can't figure out how to stop! # Base64 ensures that we get the same message at the other side, and works # for binary too. log.debug('Raw length: ' + str(len(msg_data))) msg64 = base64.encodestring(zlib.compress(msg_data)) log.debug('Encoded length: ' + str(len(msg64))) # send the signed/encrypted message to the topic cert = self._configuration.certificate key = self._configuration.key log.debug('Signing') signed = sign_message(msg64, cert, key) log.debug('Encrypting signed message of length ' + str(len(signed))) # Here, Kevin had compressed it again as well as encrypting. # This meant that the content of the message sent was not ascii, # and this was the cause of the conflict in versions of stomppy. # If we leave it as encrypted, the message will be ascii-only. encrypted = encrypt_message(signed, self._consumer_certificate) log.debug('Encrypted length: '+str(len(encrypted))) if self._send_message(self._producer_config.send_to, SSM_NORMAL_MSG, msg_id, encrypted): # We're now waiting for an acknowledgement; this arrives over # our acknowledgement topic/queue counter = ACK_WAIT_TIME / 0.2 while counter and self._waiting_for_ack(m5): time.sleep(0.2) counter -= 1 # Rejection logging happens only after the count has run down. # This is possibly not ideal. if self._last_acked == MSG_REJECT: log.error('Message rejected by consumer') raise SsmException, 'Message ' + msg_id + ' was rejected by consumer' if counter == 0: raise SsmException, 'No acknowledgement received for message ' + msg_id self._messagedb.clear_outgoing_message(msg_id) log.debug('Message ' + msg_id + ' acknowledged by consumer') self._reack = m5 return True return False def _send_message(self, destination, msg_type, msg_id, msg): """ Send msg to the destination topic/queue, using the msg_id value as the message identifier. You may use SSM_PING_MSG for this value if an empty (ping) message is to be sent. """ if not self._is_connected: log.error('Failed to send message ID %s: not connected' % msg_id) return False log.debug('Sending ' + msg_id) # standard headers, which we'll add to as necessary... hdrs = {SSM_MSG_TYPE: msg_type, 'destination': destination, \ 'receipt': msg_id, SSM_ID_HEADER: msg_id} # We only need a reply-to header if we're a producer. if self._producer_config is not None: hdrs['reply-to'] = self._producer_config.ack_queue if self._reack is not None: # send out our re-ack to tell the consumer that we've seen his ack # to our previous message, so the consumer can stop tracking the # fact it received the previous message hdrs[SSM_REACK_HEADER] = self._reack log.debug('Sending reack '+self._reack) self._reack = None self._connection.send(msg, headers=hdrs) # only track acks for genuine messages if msg_id != SSM_PING_MSG: self._last_acked = None return True def _send_ack(self, destination, msg_hash): """ Send acknowledgment of the msg_id to the destination topic/queue. If the message is rejected, MSG_REJECT can be sent instead of the hash. """ if self._is_connected: log.debug('Sending ack for ' + msg_hash) self._connection.send('', headers = {\ 'destination': destination, \ 'receipt': msg_hash, \ SSM_MSG_TYPE: SSM_ACK_MSG,\ SSM_ID_HEADER: msg_hash}) def _waiting_for_ack(self, msg_id): """ Return False if the msg_id matches the self._last_acked id. i.e. if it's the same, we're no longer waiting for the ack. """ return (self._last_acked != msg_id) def _clear_certificate(self): """ Clears the stored certificate of the consumer to which we are sending messages. This just means that the next time we come to send a message, we'll request the certificate first. """ self._consumer_certificate = None def _request_certificate(self): """ Handle process of sending a request for the consumer's host certificate, so we can use it during the encryption phase of transmitting messages. Raises an exception if the certificate request is not answered, or returns True if the certificate is received NB The certificate response is handled asynchronously in on_message """ self._clear_certificate() if self._send_message(self._producer_config.send_to, SSM_CERT_REQ_MSG, NO_MSG_ID, ''): counter = ACK_WAIT_TIME / 0.2 # wait for an acknowledgement which should contain the certificate while counter and self._consumer_certificate == None: counter -= 1 time.sleep(0.2) if counter == 0: raise SsmException, 'Timed out waiting for certificate' log.debug('Got certificate') return True raise SsmException, 'Failed to retrieve certificate' def _decrypt_verify(self, headers, body): """ Message handler for normal messages, it will decrypt and then attempt to verify that the message is from a genuine producer. Assuming success, the message will be written to a directory for another process to read from. """ # Try to decrypt the message, and then verify the producer of the # message. Assuming success, the message will be written out to the # message database log.debug('Decrypt/verify message length '+str(len(body))) log.debug(headers) cert = self._configuration.certificate key = self._configuration.key capath = self._configuration.capath check_crls = self._configuration.check_crls try: # Note - here I've removed the additional compression. decrypted = decrypt_message(body, cert, key, capath) signer, message64 = verify_message(decrypted, capath, check_crls) message = zlib.decompress(base64.decodestring(message64)) except Exception, err: log.error('Failed decrypt or verify:%s\n' % str(err)) else: if not self._valid_sender(signer): log.warning('Received from invalid DN: ' + signer) self._send_ack(headers['reply-to'], MSG_REJECT) return msg_hash = message_hash(message) msg_id = time.strftime('%Y%m%d-%H%M%S-') + str(self._received).zfill(3) if self._messagedb.new_incoming_message(msg_id, signer, message): log.debug('Saved new message ' + msg_id) else: log.warning('Already received, ignored ' + msg_id) # finally, ack the message. We don't need to sign the ack because the # ack contains the md5sum of the *decrypted* message, and only the # producer and consumer share this secret. # It is possible that the ack might not get to the producer, and so # someone else could spoof the consumer into thinking the producer has # sent a re-ack, but this is very unlikely and won't result in data # loss - at worst it may cause a duplicated message may be received, # but again - unlikely. self._send_ack(headers['reply-to'], msg_hash) def _run_admin_thread(self): """ Method run by a background thread. It does various repeating tasks which need to be done periodically, but shouldn't interfere with the regular control flow of the SSM. The tasks shouldn't take any significant time. This method needs to be called by a threading.Thread object with daemon set to true; otherwise it will hang indefinitely even if the python process is killed. """ # Number of seconds per loop. interval = 60 # Don't start the counter at 0 so all the methods don't get called # first time round. counter = 1 try: while True: if counter % PING_TIMER == 0: self._send_ping() if counter % self._fetch_brokers_interval == 0: self._fetch_brokers() # Read valid DNs only if running as consumer if self._consumer_config is not None: if counter % self._read_dn_interval == 0: self._read_valid_dns() # Clear consumer certificate only if running as producer if self._producer_config is not None: if counter % self._clear_cert_interval == 0: self._reset_certificate() counter = (counter + 1) % 10080 # minutes in a week time.sleep(interval) except Exception, e: # Exceptions in the background thread lead to undefined behaviour, # and it's difficult to recover. Best to die, but also to be # careful with what the thread does to make sure it doesn't # happen often. log.warn("Exception in the admin thread:") log.warn(type(e)) log.warn(e) self._death_exception = e self._dead = True def _send_ping(self): """ Send a ping request. """ if self._is_connected: log.debug('Pinging') destination = None try: destination = self._producer_config.send_to except AttributeError: destination = self._consumer_config.listen_to self._send_message(destination, SSM_PING_MSG, SSM_PING_MSG, '') def _read_valid_dns(self): """ Read a list of valid DNs from the file. We assume that all lines in the file are valid DNs. Like _send_ping, this has got to repeat every so often, and it uses a Timer which is another thread. Because it needs access to the valid_dns list it must get a lock. """ filename = self._consumer_config.valid_dn log.debug("Loading valid DNs file: " + filename) # Get the lock for the valid_dns list self._valid_dn_lock.acquire() try: f = open(filename) lines = f.readlines() # strip the newlines ('\n') from each DN self._valid_dns = [dn.strip().lower() for dn in lines] f.close() except Exception, e: log.warn(type(e)) log.warn("Exception while loading valid DNs file: " + str(e)) log.warn("The valid DNs have not been updated.") # Always release the lock! self._valid_dn_lock.release() def _reset_certificate(self): """ Set the consumer certificate to None. This is to make sure that an expired host certificate doesn't cause problems - each time this method is called it should be requested again. """ log.info('Clearing consumer certificate.') self._clear_certificate() def _fetch_brokers(self): """ If a BDII URL and a network name are supplied in the configuration, use them to find the list of available brokers and return them all. Otherwise, use the host and port provided. """ # abbreviation cfg = self._configuration if cfg.bdii is not None and cfg.broker_network is not None: log.info("BDII URL: %s" % cfg.bdii) log.info("BDII broker network: %s" % cfg.broker_network) # Use the lock! self._network_brokers_lock.acquire() self._network_brokers = fetch_brokers(cfg.bdii, cfg.broker_network, cfg.use_ssl) # TODO: remove testing addition # self._network_brokers.insert(0, ("apel-dev.esc.rl.ac.uk", 61613)) self._network_brokers_lock.release() for item in self._network_brokers: log.debug("Found broker in BDII: %s:%d" % item) if not self._network_brokers: raise SsmException("No brokers found for URL %s and network %s" % (cfg.bdii, cfg.broker_network)) else: log.info("Using broker details supplied: %s:%d" % (cfg.host, cfg.port)) self._broker_host = cfg.host self._broker_port = cfg.port def _valid_sender(self, sender): """ Check if the sender's DN is in the allowed list. _read_valid_dns() must be run before this method. """ # Acquire lock before reading from list. self._valid_dn_lock.acquire() valid = False try: if sender.lower() in self._valid_dns: log.info('Valid sender: ' + sender) valid = True else: log.info('Invalid sender: ' + sender) except Exception, e: log.warning("Error reading internal list of valid DNs: " + str(e) + " - program will continue.") # Always release the lock! self._valid_dn_lock.release() return valid def check_ssm_config(config, consumer_config, producer_config): """ Make basic checks on the contents of the configuration file to make sure they are not inconsistent. """ if consumer_config is None and producer_config is None: raise SsmException, 'No producer or consumer configuration supplied' if consumer_config is not None and not config.daemon: raise SsmException("If a consumer is configured, " + \ "the ssm must be run with the -d flag as a daemon.") if config.daemon: if producer_config.msg_check_time is None: log.info("[producer]->msg-check-time not specified. Default value" + \ " will be used.") # The SSM cannot work without a valid and matching cert and key. try: cert = from_file(config.certificate) key = from_file(config.key) if not check_cert_key(cert, key): raise SsmException("Certificate and key don't match.") except Exception, e: log.error('Error retrieving cert or key file: ' + str(e)) raise # If the pidfile exists, don't start up. if os.path.exists(config.pidfile): log.warn("A pidfile %s already exists." % config.pidfile) log.warn("Check that the SSM is not running, then remove the file.") raise SsmException("SSM cannot start while pidfile exists.") def fetch_brokers(bdii, network, use_ssl): """ Given the URL of a BDII and the name of a broker network, retrieve (host, port) tuples for all the stomp brokers in the network. """ broker_getter = get_brokers.StompBrokerGetter(bdii) if use_ssl: service_type = get_brokers.STOMP_SSL_SERVICE else: service_type = get_brokers.STOMP_SERVICE return broker_getter.get_broker_hosts_and_ports(service_type, network)
#!/usr/bin/env python3 import sys import os, subprocess import argparse import socket import time import random import string import ssl parser = argparse.ArgumentParser( description="SIP extension enumeration" ) parser.add_argument( '--proto', dest="PROTOCOL", type=str, default="udp", help="TCP or UDP <tcp> <udp>. Default is UDP" ) parser.add_argument( '--dport', dest="DPORT", type=int, default=5060, help="Destination port. Default is 5060" ) parser.add_argument( '--dst', dest="DST", type=str, required=True, help="Destination IP address" ) parser.add_argument( '--file', dest="FILE", type=str, default="register.txt", help="The register message to enumerate users. Default is \"register.txt\"" ) parser.add_argument( '--wordlist', dest="WORDLIST", type=str, default="users/10-99.txt", help="The word list for enumeration. Default is \"users/10-99.txt\"" ) parser.add_argument( '--src', dest="SRC", type=str, required=True, help="Source IP address" ) parser.add_argument( '--domain', dest="DOMAIN", type=str, required=True, help="The SIP domain" ) parser.add_argument( '--key', dest="KEY", type=str, default="key.key", help="Private key file for tls connection. Default is \"key.key\"" ) parser.add_argument( '--crt', dest="CRT", type=str, default="crt.crt", help="Certificate for tls connection. Default is \"crt.crt\"" ) args = parser.parse_args() def get_payload(): f=open(args.FILE, "r", newline="") if f.mode == "r": payload = f.read() return payload else: sys.exit(1) def replace_payload(payload): payload = payload.replace("DOMAIN", args.DOMAIN) payload = payload.replace("SRC", args.SRC) if args.PROTOCOL == "tcp": payload = payload.replace("PROTO", "TCP") elif args.PROTOCOL == "tls": payload = payload.replace("PROTO", "TLS") else: payload = payload.replace("PROTO", "UDP") return payload def main(): payload = get_payload() payload = replace_payload(payload) lines = [line.rstrip('\n') for line in open(args.WORDLIST)] i = 0 while i < len(lines): try: output=int(round((int(i)/int(len(lines)))*int(100))) print("\033[1;34m[*]\033[0m Progress: " + str(output) + "%", end="\r") req = payload.replace("USER", lines[i]) req = req.replace("CALLID", ( ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(32)))) req = req.replace("BRANCH", ( ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(10)))) if args.PROTOCOL == "tcp": sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) elif args.PROTOCOL == "tls": sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLSv1, keyfile=args.KEY, certfile=args.CRT) else: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: sock.settimeout(2.0) sock.connect((args.DST, args.DPORT)) sock.send(req.encode()) response = sock.recv(1024) if bytes('100 Trying', 'utf-8') in response: response = sock.recv(1024) if bytes('401 Unauthorized', 'utf-8') in response: print("\033[1;32m[+]\033[0m Authentication required for " + lines[i] + "\r\n") elif bytes('407 Proxy Authentication Required', 'utf-8') in response: print("\033[1;32m[+]\033[0m Authentication required for " + lines[i] + "\r\n") elif bytes('200 OK', 'utf-8') in response: print("\033[1;32m[+]\033[0;32m No authentication required for " + lines[i] + "\033[0m\r\n") sock.close() i += 1 except (KeyboardInterrupt): print("\033[1;34m[*]\033[0m User interruption. Exiting ...") sys.exit(0) except: i += 1 sock.close() except (KeyboardInterrupt): print("\033[1;34m[*]\033[0m User interruption. Exiting ...") sys.exit(0) if __name__ == "__main__": main()
<reponame>quizlet/abracadabra<filename>abra/inference/frequentist/means.py #!/usr/bin/python # -*- coding: utf-8 -*- from abra.config import DEFAULT_ALPHA, MIN_OBS_FOR_Z from abra.stats import Samples, MeanComparison from abra.inference.frequentist.results import FrequentistTestResults from abra.inference import FrequentistProcedure from numpy import ndarray class MeansDelta(FrequentistProcedure): """ Runs frequentist inference procedure to test for the difference in two sample means. Assumes normality for large sample sizes, or t-distribution for small sample sizes. Parameters ---------- hypothesis : str the althernative hypothesis: - 'larger': one-tailed test, assume variation is larger than null - 'smaller': one-tailed test, assume variation is smaller than null - 'unequal': two-tailed test, variaion mean is different than null var_assumptions : str whether to use pooled or unequal variance assumptions - 'pooled': assume the same variance - 'unequal': use Smith-Satterthwait dof when calculating t-stat """ def __init__(self, var_assumptions='unequal', *args, **kwargs): super(MeansDelta, self).__init__(*args, **kwargs) self.var_assumptions = var_assumptions def run(self, control_samples, variation_samples, alpha=DEFAULT_ALPHA, inference_kwargs=None): """ Run the inference procedure over the samples with a selected alpha value. alpha : float in [0, 1] the assumed Type I error rate """ if isinstance(control_samples, (list, ndarray)): control_samples = Samples(control_samples) if isinstance(variation_samples, (list, ndarray)): variation_samples = Samples(variation_samples) self.alpha = alpha nobs = min(control_samples.nobs, variation_samples.nobs) test_statistic = 'z' if nobs > MIN_OBS_FOR_Z else 't' self.comparison = MeanComparison( samples_a=variation_samples, samples_b=control_samples, alpha=self.alpha, test_statistic=test_statistic, hypothesis=self.hypothesis ) @property def stats(self): f_stats = getattr(self.comparison, "{}test_ind".format(self.test_statistic)) return f_stats(alternative=self.hypothesis_sm) @property def ci(self): """ Calculate confidence interval percentiles and values. """ f_ci = getattr(self.comparison, "{}confint_diff".format(self.test_statistic)) ci_vals = f_ci(self.alpha, self.hypothesis_sm, self.var_assumptions) return [ci_vals, self.ci_percents] def make_results(self): """ Package up inference results """ stats = self.stats statistic_value = stats[0] p_value = stats[1] accept_hypothesis = self.accept_hypothesis(statistic_value) df = stats[2] if self.test_statistic == 't' else None return FrequentistTestResults( control=self.comparison.d2, variation=self.comparison.d1, delta=self.comparison.delta, delta_relative=self.comparison.delta_relative, effect_size=self.comparison.effect_size, alpha=self.comparison.alpha, power=self.comparison.power, confidence_interval=self.ci, test_statistic=self.test_statistic, statistic_value=statistic_value, p_value=p_value, df=df, hypothesis=self.hypothesis_text, accept_hypothesis=accept_hypothesis, inference_procedure=self, warnings=self.comparison.warnings )
<gh_stars>1-10 # -*- coding: utf-8 -*- # # Copyright: (c) 2019, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest import sys if sys.version_info < (2, 7): pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7") from ansible.module_utils.basic import AnsibleModule from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_firewall_log_profile import ( ApiParameters, ModuleParameters, ModuleManager, ArgumentSpec ) from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): args = dict( name='foo', description='my description', partition='Common', ip_intelligence=dict( log_publisher='foobar', rate_limit='300000', log_translation_fields='yes', log_rtbh='yes', log_shun='yes', ), port_misuse=dict( log_publisher='/Part/bazbar', rate_limit='indefinite', ), dos_protection=dict( sip_publisher='sip-pub', dns_publisher='/Temp/dns-pub', network_publisher='net-pub' ) ) p = ModuleParameters(params=args) assert p.name == 'foo' assert p.description == 'my description' assert p.ip_rate_limit == 300000 assert p.ip_log_publisher == '/Common/foobar' assert p.ip_log_translation_fields == 'enabled' assert p.ip_log_shun is None assert p.ip_log_rtbh == 'enabled' assert p.port_log_publisher == '/Part/bazbar' assert p.port_rate_limit == 4294967295 assert p.dns_publisher == '/Temp/dns-pub' assert p.sip_publisher == '/Common/sip-pub' assert p.network_publisher == '/Common/net-pub' def test_api_parameters(self): args = load_fixture('load_afm_log_global_network_profile.json') p = ApiParameters(params=args) assert p.name == 'global-network' assert p.description == 'Default logging profile for network events' assert p.ip_log_shun == 'disabled' assert p.ip_log_translation_fields == 'disabled' assert p.ip_rate_limit == 4294967295 assert p.port_rate_limit == 4294967295 assert p.ip_log_publisher is None assert p.port_log_publisher is None class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_firewall_log_profile.tmos_version') self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_firewall_log_profile.send_teem') self.m2 = self.p2.start() self.m2.return_value = '14.1.0' self.m3 = self.p3.start() self.m3.return_value = True def tearDown(self): self.p2.stop() self.p3.stop() def test_create(self, *args): set_module_args(dict( name='foo', description='this is a description', provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) mm = ModuleManager(module=module) # Override methods to force specific logic in the module to happen mm.exists = Mock(side_effect=[False, True]) mm.create_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True assert results['description'] == 'this is a description'
"""Test asyncpraw.models.comment_forest.""" import pytest from asynctest import mock from asyncpraw.exceptions import DuplicateReplaceException from asyncpraw.models import Comment, MoreComments, Submission from .. import IntegrationTest class TestCommentForest(IntegrationTest): def setUp(self): super().setUp() # Responses do not decode well on travis so manually re-enable gzip. self.reddit._core._requestor._http._default_headers["Accept-Encoding"] = "gzip" async def test_replace__all(self): with self.use_cassette(match_requests_on=["uri", "method", "body"]): submission = await self.reddit.submission("3hahrw") comments = await submission.comments() before_count = len(await comments.list()) skipped = await comments.replace_more(None, threshold=0) assert len(skipped) == 0 assert all([isinstance(x, Comment) for x in await comments.list()]) assert all([x.submission == submission for x in await comments.list()]) assert before_count < len(await comments.list()) async def test_replace__all_large(self): with self.use_cassette(match_requests_on=["uri", "method", "body"]): submission = Submission(self.reddit, "n49rw") comments = await submission.comments() skipped = await comments.replace_more(None, threshold=0) assert len(skipped) == 0 assert all([isinstance(x, Comment) for x in await comments.list()]) assert len(await comments.list()) > 1000 assert len(await comments.list()) == len(submission._comments_by_id) async def test_replace__all_with_comment_limit(self): with self.use_cassette(match_requests_on=["uri", "method", "body"]): submission = await self.reddit.submission("3hahrw") submission.comment_limit = 10 comments = await submission.comments() skipped = await comments.replace_more(None, threshold=0) assert len(skipped) == 0 assert len(await comments.list()) >= 500 @mock.patch("asyncio.sleep", return_value=None) async def test_replace__all_with_comment_sort(self, _): with self.use_cassette(match_requests_on=["uri", "method", "body"]): submission = await self.reddit.submission("3hahrw") submission.comment_sort = "old" comments = await submission.comments() skipped = await comments.replace_more(None, threshold=0) assert len(skipped) == 0 assert len(await comments.list()) >= 500 async def test_replace__skip_at_limit(self): with self.use_cassette(match_requests_on=["uri", "method", "body"]): submission = await self.reddit.submission("3hahrw") comments = await submission.comments() skipped = await comments.replace_more(1) assert len(skipped) == 5 # async def test_replace__skip_below_threshold(self): # FIXME: not currently working; same with praw # with self.use_cassette(match_requests_on=["uri", "method", "body"]): # submission = Submission(self.reddit, "hkwbo0") # comments = await submission.comments() # before_count = len(await comments.list()) # skipped = await comments.replace_more(16, 5) # assert len(skipped) == 6 # assert all(x.count < 5 for x in skipped) # assert all(x.submission == submission for x in skipped) # assert before_count < len(await comments.list()) async def test_replace__skip_all(self): with self.use_cassette(match_requests_on=["uri", "method", "body"]): submission = await self.reddit.submission("3hahrw") comments = await submission.comments() before_count = len(await comments.list()) skipped = await comments.replace_more(limit=0) assert len(skipped) == 6 assert all(x.submission == submission for x in skipped) after_count = len(await comments.list()) assert before_count == after_count + len(skipped) @mock.patch("asyncio.sleep", return_value=None) async def test_replace__on_comment_from_submission(self, _): with self.use_cassette(match_requests_on=["uri", "method", "body"]): submission = await self.reddit.submission("3hahrw") comments = await submission.comments() types = [type(x) for x in await comments.list()] assert types.count(Comment) == 527 assert types.count(MoreComments) == 6 new_comments = await submission.comments() replace_more = await new_comments[0].replies.replace_more() assert replace_more == [] types = [type(x) for x in await comments.list()] assert types.count(Comment) == 531 assert types.count(MoreComments) == 3 @mock.patch("asyncio.sleep", return_value=None) async def test_replace__on_direct_comment(self, _): with self.use_cassette(match_requests_on=["uri", "method", "body"]): comment = await self.reddit.comment("d8r4im1") await comment.refresh() assert any( [isinstance(x, MoreComments) for x in await comment.replies.list()] ) await comment.replies.replace_more() assert all([isinstance(x, Comment) for x in await comment.replies.list()]) @mock.patch("asyncio.sleep", return_value=None) async def test_comment_forest_refresh_error(self, _): self.reddit.read_only = False with self.use_cassette(match_requests_on=["uri", "method", "body"]): submission = await self.async_next(self.reddit.front.top()) # await submission._fetch() submission.comment_limit = 1 comments = await submission.comments() await comments[1].comments() with pytest.raises(DuplicateReplaceException): await comments.replace_more(limit=1)
<reponame>ctuning/ck-spack<filename>package/spack-octopus/package.py<gh_stars>1-10 ############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by <NAME>, <EMAIL>, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Octopus(Package): """A real-space finite-difference (time-dependent) density-functional theory code.""" homepage = "http://www.tddft.org/programs/octopus/" url = "http://www.tddft.org/programs/octopus/down.php?file=6.0/octopus-6.0.tar.gz" version('7.3', '87e51fa4a3a999706ea4ea5e9136996f') version('6.0', '5d1168c2a8d7fd9cb9492eaebaa7182e') version('5.0.1', '2b6392ab67b843f9d4ca7413fc07e822') variant('scalapack', default=False, description='Compile with Scalapack') variant('metis', default=False, description='Compile with METIS') variant('parmetis', default=False, description='Compile with ParMETIS') variant('netcdf', default=False, description='Compile with Netcdf') variant('arpack', default=False, description='Compile with ARPACK') depends_on('blas') depends_on('[email protected]:') depends_on('lapack') depends_on('libxc') depends_on('mpi') depends_on('fftw@3:+mpi+openmp') depends_on('metis@5:', when='+metis') depends_on('parmetis', when='+parmetis') depends_on('scalapack', when='+scalapack') depends_on('netcdf-fortran', when='+netcdf') depends_on('arpack-ng', when='+arpack') # optional dependencies: # TODO: etsf-io, sparskit, # feast, libfm, pfft, isf, pnfft def install(self, spec, prefix): lapack = spec['lapack'].libs blas = spec['blas'].libs args = [] args.extend([ '--prefix=%s' % prefix, '--with-blas=%s' % blas.ld_flags, '--with-lapack=%s' % lapack.ld_flags, '--with-gsl-prefix=%s' % spec['gsl'].prefix, '--with-libxc-prefix=%s' % spec['libxc'].prefix, 'CC=%s' % spec['mpi'].mpicc, 'FC=%s' % spec['mpi'].mpifc, '--enable-mpi', '--with-fftw-prefix==%s' % spec['fftw'].prefix, ]) if '+metis' in spec: args.extend([ '--with-metis-prefix=%s' % spec['metis'].prefix, ]) if '+parmetis' in spec: args.extend([ '--with-parmetis-prefix=%s' % spec['parmetis'].prefix, ]) if '+netcdf' in spec: args.extend([ '--with-netcdf-prefix=%s' % spec['netcdf-fortran'].prefix, '--with-netcdf-include=%s' % spec['netcdf-fortran'].prefix.include, ]) if '+arpack' in spec: arpack_libs = spec['arpack-ng'].libs.joined() args.extend([ '--with-arpack={0}'.format(arpack_libs), ]) if '+mpi' in spec['arpack-ng']: args.extend([ '--with-parpack={0}'.format(arpack_libs), ]) if '+scalapack' in spec: args.extend([ '--with-blacs=%s' % spec['scalapack'].libs, '--with-scalapack=%s' % spec['scalapack'].libs ]) # --with-etsf-io-prefix= # --with-sparskit=${prefix}/lib/libskit.a # --with-pfft-prefix=${prefix} --with-mpifftw-prefix=${prefix} # --with-berkeleygw-prefix=${prefix} # When preprocessor expands macros (i.e. CFLAGS) defined as quoted # strings the result may be > 132 chars and is terminated. # This will look to a compiler as an Unterminated character constant # and produce Line truncated errors. To vercome this, add flags to # let compiler know that the entire line is meaningful. # TODO: For the lack of better approach, assume that clang is mixed # with GNU fortran. if spec.satisfies('%clang') or spec.satisfies('%gcc'): args.extend([ 'FCFLAGS=-O2 -ffree-line-length-none' ]) configure(*args) make() # short tests take forever... # make('check-short') make('install')
import pytest, numpy as np from sequentia.internals import _Validator from ...support import assert_equal, assert_not_equal, assert_all_equal val = _Validator() # ================================== # # _Validator.observation_sequences() # # ================================== # def test_single_observation_sequence_with_single(): """Single observation sequence with allow_single=True""" x = np.arange(8).reshape(-1, 2) assert_equal(x, val.is_observation_sequences(x, allow_single=True)) def test_single_observation_sequence_1d_flat_with_single(): """Single flat 1D observation sequence with allow_single=True""" x = np.arange(4) assert_equal(val.is_observation_sequences(x, allow_single=True), np.array([ [0], [1], [2], [3] ])) def test_single_observation_sequence_1d_with_single(): """Single non-flat 1D observation sequence with allow_single=True""" x = np.arange(4).reshape(-1, 1) assert_equal(x, val.is_observation_sequences(x, allow_single=True)) def test_single_observation_sequence_wrong_type_with_single(): """Single observation sequence with wrong type and allow_single=True""" x = 1 with pytest.raises(TypeError) as e: val.is_observation_sequences(x, allow_single=True) assert str(e.value) == 'Expected an individual observation sequence or a list of multiple observation sequences, each of type numpy.ndarray' def test_multiple_observation_sequences_with_single(): """Multiple observation sequences with allow_single=True""" X = [np.arange(8).reshape(-1, 2), np.arange(12).reshape(-1, 2)] assert_all_equal(X, val.is_observation_sequences(X, allow_single=True)) def test_multiple_observation_sequences_diff_dims_with_single(): """Multiple observation sequences with different dimensionality and allow_single=True""" X = [np.arange(8).reshape(-1, 2), np.arange(12).reshape(-1, 3)] with pytest.raises(ValueError) as e: val.is_observation_sequences(X, allow_single=True) assert str(e.value) == 'Each observation sequence must have the same dimensionality' def test_multiple_observation_sequences_1d_some_flat_with_single(): """Multiple 1D (flat and non-flat) observation sequences with allow_single=True""" X = [np.arange(2).reshape(-1, 1), np.arange(3)] assert_all_equal(val.is_observation_sequences(X, allow_single=True), [ np.array([ [0], [1] ]), np.array([ [0], [1], [2] ]) ]) def test_multiple_observation_sequences_1d_all_flat_with_single(): """Multiple flat 1D observation sequences with allow_single=True""" X = [np.arange(2), np.arange(3)] assert_all_equal(val.is_observation_sequences(X, allow_single=True), [ np.array([ [0], [1] ]), np.array([ [0], [1], [2] ]) ]) def test_multiple_observation_sequences_1d_with_single(): """Multiple 1D observation sequences with allow_single=True""" X = [np.arange(8).reshape(-1, 1), np.arange(12).reshape(-1, 1)] assert_all_equal(X, val.is_observation_sequences(X, allow_single=True)) def test_multiple_observation_sequences_some_wrong_type_with_single(): """Multiple observation sequences with different types and allow_single=True""" X = [np.arange(4).reshape(-1, 1), np.arange(8).reshape(-1, 1), 3] with pytest.raises(TypeError) as e: val.is_observation_sequences(X, allow_single=True) assert str(e.value) == 'Each observation sequence must be a numpy.ndarray' def test_multiple_observation_sequences_all_wrong_type_with_single(): """Multiple observation sequences with the wrong type with allow_single=True""" X = [1, 2, 3, 4] with pytest.raises(TypeError) as e: val.is_observation_sequences(X, allow_single=True) assert str(e.value) == 'Each observation sequence must be a numpy.ndarray' def test_multiple_observation_sequences_wrong_list_type_with_single(): """Multiple observation sequences with the wrong list type with allow_single=True""" X = [[1, 2, 3, 4], [4, 3, 2, 1]] with pytest.raises(TypeError) as e: val.is_observation_sequences(X, allow_single=True) assert str(e.value) == 'Each observation sequence must be a numpy.ndarray' def test_single_observation_sequence_without_single(): """Single observation sequence with allow_single=False""" x = np.arange(8).reshape(-1, 2) with pytest.raises(TypeError) as e: val.is_observation_sequences(x, allow_single=False) assert str(e.value) == 'Expected a list of observation sequences, each of type numpy.ndarray' def test_single_observation_sequence_1d_flat_without_single(): """Single flat 1D observation sequence with allow_single=False""" x = np.arange(3) assert_equal(val.is_observation_sequences(x, allow_single=True), np.array([ [0], [1], [2] ])) def test_single_observation_sequence_1d_without_single(): """Single non-flat 1D observation sequence with allow_single=False""" x = np.arange(4).reshape(-1, 1) with pytest.raises(TypeError) as e: val.is_observation_sequences(x, allow_single=False) assert str(e.value) == 'Expected a list of observation sequences, each of type numpy.ndarray' def test_single_observation_sequence_wrong_type_without_single(): """Single observation sequence with wrong type and allow_single=False""" x = 1 with pytest.raises(TypeError) as e: val.is_observation_sequences(x, allow_single=False) assert str(e.value) == 'Expected a list of observation sequences, each of type numpy.ndarray' def test_multiple_observation_sequences_without_single(): """Multiple observation sequences with allow_single=False""" X = [np.arange(8).reshape(-1, 2), np.arange(12).reshape(-1, 2)] assert_all_equal(X, val.is_observation_sequences(X, allow_single=False)) def test_multiple_observation_sequences_diff_dims_without_single(): """Multiple observation sequences with different dimensionality and allow_single=False""" X = [np.arange(8).reshape(-1, 2), np.arange(12).reshape(-1, 3)] with pytest.raises(ValueError) as e: val.is_observation_sequences(X, allow_single=False) assert str(e.value) == 'Each observation sequence must have the same dimensionality' def test_multiple_observation_sequences_1d_some_flat_without_single(): """Multiple 1D (flat and non-flat) observation sequences with allow_single=False""" X = [np.arange(2).reshape(-1, 1), np.arange(3)] assert_all_equal(val.is_observation_sequences(X, allow_single=False), [ np.array([ [0], [1] ]), np.array([ [0], [1], [2] ]) ]) def test_multiple_observation_sequences_1d_all_flat_without_single(): """Multiple flat 1D observation sequences with allow_single=False""" X = [np.arange(2), np.arange(3)] assert_all_equal(val.is_observation_sequences(X, allow_single=False), [ np.array([ [0], [1] ]), np.array([ [0], [1], [2] ]) ]) def test_multiple_observation_sequences_1d_without_single(): """Multiple 1D observation sequences with allow_single=False""" X = [np.arange(8).reshape(-1, 1), np.arange(12).reshape(-1, 1)] assert_all_equal(X, val.is_observation_sequences(X, allow_single=False)) def test_multiple_observation_sequence_some_wrong_type_without_single(): """Multiple observation sequences with different types and allow_single=False""" X = [np.arange(4).reshape(-1, 1), np.arange(8).reshape(-1, 1), 3] with pytest.raises(TypeError) as e: val.is_observation_sequences(X, allow_single=False) assert str(e.value) == 'Each observation sequence must be a numpy.ndarray' def test_multiple_observation_sequence_wrong_type_without_single(): """Multiple observation sequences with the wrong type with allow_single=False""" X = [1, 2, 3, 4] with pytest.raises(TypeError) as e: val.is_observation_sequences(X, allow_single=False) assert str(e.value) == 'Each observation sequence must be a numpy.ndarray' def test_multiple_observation_sequence_wrong_list_type_without_single(): """Multiple observation sequences with the wrong list type with allow_single=False""" X = [[1, 2, 3, 4], [4, 3, 2, 1]] with pytest.raises(TypeError) as e: val.is_observation_sequences(X, allow_single=False) assert str(e.value) == 'Each observation sequence must be a numpy.ndarray' # ============================================= # # _Validator.observation_sequences_and_labels() # # ============================================= # def test_observation_sequences_and_labels_different_label_types(): X = [np.arange(8).reshape(-1, 1), np.arange(12).reshape(-1, 1)] y = ['c1', 1] with pytest.raises(TypeError) as e: val.is_observation_sequences_and_labels(X, y) assert str(e.value) == 'Expected all labels to be of the same type' def test_observation_sequences_and_labels_different_lengths(): X = [np.arange(8).reshape(-1, 1), np.arange(12).reshape(-1, 1)] y = ['c1', 'c2', 'c1'] with pytest.raises(ValueError) as e: val.is_observation_sequences_and_labels(X, y) assert str(e.value) == 'Expected the same number of observation sequences and labels' def test_observation_sequences_and_labels_same_length(): """Observation sequences and labels with the same length.""" X = [np.arange(8).reshape(-1, 1), np.arange(12).reshape(-1, 1)] y = ['c1', 'c2'] X_val, y_val = val.is_observation_sequences_and_labels(X, y) assert_all_equal(X, X_val) assert y_val == y # ==================== # # _Validator.integer() # # ==================== # def test_integer_with_correct_type(): """Integer type""" assert val.is_integer(1, desc='test') == 1 def test_integer_with_float(): """Float type""" with pytest.raises(TypeError) as e: val.is_integer(1., desc='test') assert str(e.value) == 'Expected test to be an integer' def test_integer_with_wrong_type(): """Incorrect type""" with pytest.raises(TypeError) as e: val.is_integer('a', desc='test') assert str(e.value) == 'Expected test to be an integer' # =================== # # _Validator.string() # # =================== # def test_string_with_correct_type(): """String type""" assert val.is_string('test', desc='test') == 'test' def test_string_with_wrong_type(): """Incorrect type""" with pytest.raises(TypeError) as e: val.is_string(1, desc='test') assert str(e.value) == 'Expected test to be a string' # ============================== # # _Validator.string_or_numeric() # # ============================== # def test_string_or_numeric_with_string_type(): """String type""" assert val.is_string_or_numeric('test', desc='test') == 'test' def test_string_or_numeric_with_numeric_type(): """Numeric type""" assert val.is_string_or_numeric(1, desc='test') == 1 def test_string_or_numeric_with_wrong_type(): """Incorrect type""" with pytest.raises(TypeError) as e: val.is_string_or_numeric([], desc='test') assert str(e.value) == 'Expected test to be a string or numeric' # ==================== # # _Validator.boolean() # # ==================== # def test_boolean_with_correct_type(): """Boolean type""" assert val.is_boolean(True, desc='test') == True def test_boolean_with_wrong_type(): """Incorrect type""" with pytest.raises(TypeError) as e: val.is_boolean(1, desc='test') assert str(e.value) == 'Expected test to be a boolean' # =================== # # _Validator.one_of() # # =================== # def test_one_of_correct_with_multiple_types(): """List of multiple types with a correct input""" assert val.is_one_of(2, [True, 'test', 2], desc='test') == 2 def test_one_of_incorrect_with_multiple_types(): """List of multiple types with an incorrect input""" with pytest.raises(ValueError) as e: val.is_one_of(2, [True, 'test', 2.1], desc='test') assert str(e.value) == "Expected test to be one of [True, 'test', 2.1]" def test_one_of_correct_with_single_type(): """List of single type with a correct input""" assert val.is_one_of(2, [0, 1, 2], desc='test') == 2 def test_one_of_incorrect_with_single_type(): """List of single type with an incorrect input""" with pytest.raises(ValueError) as e: val.is_one_of(2, [0, 1, 3], desc='test') assert str(e.value) == "Expected test to be one of [0, 1, 3]" # =============================== # # _Validator.restricted_integer() # # =============================== # def test_restricted_integer_wrong_type_meets_condition(): """Incorrect type that meets the condition""" with pytest.raises(TypeError) as e: val.is_restricted_integer('test', lambda x: len(x) == 4, 'test', 'not false') assert str(e.value) == 'Expected test to be an integer' def test_restricted_integer_wrong_type_does_not_meet_condition(): """Incorrect type that does not meet the condition""" with pytest.raises(TypeError) as e: val.is_restricted_integer('test', lambda x: len(x) != 4, 'test', 'not false') assert str(e.value) == 'Expected test to be an integer' def test_restricted_integer_correct_type_meets_condition(): """Correct type that meets the condition""" assert val.is_restricted_integer(1, lambda x: x > 0, 'test', 'greater than zero') == 1 def test_restricted_integer_correct_type_does_not_meet_condition(): """Correct type that does not meet the condition""" with pytest.raises(ValueError) as e: val.is_restricted_integer(-1, lambda x: x > 0, 'test', 'greater than zero') assert str(e.value) == 'Expected test to be greater than zero' # ============================= # # _Validator.restricted_float() # # ============================= # def test_restricted_integer_wrong_type_meets_condition(): """Incorrect type that meets the condition""" with pytest.raises(TypeError) as e: val.is_restricted_float('test', lambda x: len(x) == 4, 'test', 'not false') assert str(e.value) == 'Expected test to be a float' def test_restricted_integer_wrong_type_does_not_meet_condition(): """Incorrect type that does not meet the condition""" with pytest.raises(TypeError) as e: val.is_restricted_float('test', lambda x: len(x) != 4, 'test', 'not false') assert str(e.value) == 'Expected test to be a float' def test_restricted_integer_correct_type_meets_condition(): """Correct type that meets the condition""" assert val.is_restricted_float(1.1, lambda x: x > 0, 'test', 'greater than zero') == 1.1 def test_restricted_integer_correct_type_does_not_meet_condition(): """Correct type that does not meet the condition""" with pytest.raises(ValueError) as e: val.is_restricted_float(-1.1, lambda x: x > 0, 'test', 'greater than zero') assert str(e.value) == 'Expected test to be greater than zero' # ========================= # # _Validator.random_state() # # ========================= # def test_random_state_none(): """None random state""" s1 = np.random.RandomState(seed=None) s2 = val.is_random_state(None) assert_not_equal(s1.random((5, 5)), s2.random((5, 5))) def test_random_state_int(): """Integer random state (seed)""" s1 = np.random.RandomState(seed=0) s2 = val.is_random_state(0) assert_equal(s1.random((5, 5)), s2.random((5, 5))) def test_random_state_numpy(): """numpy.random.RandomState random state""" s1 = np.random.RandomState(seed=0) s2 = val.is_random_state(s1) assert s1 == s2 def test_random_state_invalid(): """Invalid random state""" with pytest.raises(TypeError) as e: val.is_random_state('0') assert str(e.value) == 'Expected random state to be of type: None, int, or numpy.random.RandomState'
from SolidSpheral3d import * import Gnuplot # We'll work in CGS units. units = PhysicalConstants(0.01, # Unit length in meters 0.001, # Unit mass in kg 1.0) # Unit time in sec #------------------------------------------------------------------------------- # Build a Gruneisen for SiO2 like materials. #------------------------------------------------------------------------------- mconv = 1.0 lconv = 1.0 tconv = 1.0 rho0 = 2.65 # g/cc C0 = 0.36839e6 # cm/s S1 = 1.8954 # dimensionless S2 = 0.0 # dimensionless S3 = 0.0 # dimensionless gamma0 = 0.9 # dimensionless b = 1.0 # dimensionless etaMin = 0.2 etaMax = 5.0 eosSiO2 = GruneisenEquationOfState(rho0, # ref density (g/cc) etaMin, # etamin etaMax, # etamax C0, S1, S2, S3, gamma0, b, 60.0843, # atomic weight units) #------------------------------------------------------------------------------- # Build an ANEOS SiO2 like thing. #------------------------------------------------------------------------------- izetl = vector_of_int(1, -1) initializeANEOS("ANEOS.INPUT", "ANEOS.barf", izetl) rhoMin, rhoMax = 0.9*etaMin*rho0, 1.1*etaMax*rho0 Tmin, Tmax = 1.0, 1.0e8 eosANEOS = ANEOS(0, # Material number 1000, # num rho vals 1000, # num T vals rhoMin, # minimum density (kg/m^3) rhoMax, # maximum density (kg/m^3) Tmin, # minimum temperature (K) Tmax, # maximum temperature (K) units) eps0ANEOS = eosANEOS.specificThermalEnergy(rho0, 1.0) # Specific energy at 1K, reference density print "eps0ANEOS = ", eps0ANEOS #------------------------------------------------------------------------------- # Plot the pressure as a function of (rho, eps) #------------------------------------------------------------------------------- n = 50 drho = (rhoMax - rhoMin)/n rho = [rhoMin + i*drho for i in xrange(n + 1)] epsMin = eosANEOS.specificThermalEnergy(rho0, 0.1*Tmin) epsMax = eosANEOS.specificThermalEnergy(rho0, 1.1*Tmax) deps = (epsMax - epsMin)/n eps = [epsMin + i*deps for i in xrange(n + 1)] # Write the (rho, eps, P, cs) set to a file. f = open("SiOS_ANEOS.txt", "w") f.write(""" # ANEOS vs. Gruneisen EOS dump for SiO2 like material (all units CGS). # # ANEOS eps(1K) = %g # """ % (eps0ANEOS)) f.write((6*'"%20s "' + "\n") % ("rho (g/cm^3)", "eps (erg/g)", "P Grun (dyne)", "cs Grun (cm/sec)", "P ANEOS (dyne)", "cs ANEOS (cm/sec)")) PG, csG, PA, csA = [], [], [], [] for rhoi in rho: for epsi in eps: PG.append((rhoi, epsi, eosSiO2.pressure(rhoi, epsi - epsMin))) csG.append((rhoi, epsi, eosSiO2.soundSpeed(rhoi, epsi - epsMin))) PA.append((rhoi, epsi, eosANEOS.pressure(rhoi, epsi))) csA.append((rhoi, epsi, eosANEOS.soundSpeed(rhoi, epsi))) f.write((6*"%20g " + "\n") % (rhoi, epsi, PG[-1][-1], csG[-1][-1], PA[-1][-1], csA[-1][-1])) f.close() PGplot = Gnuplot.Gnuplot() PGplot.xlabel("rho (g/cm^3)") PGplot.ylabel("eps (erg/g)") PGdata = Gnuplot.Data(PG) PGplot.splot(PGdata, title="Pressure (Gruneisen)") csGplot = Gnuplot.Gnuplot() csGplot.xlabel("rho (g/cm^3)") csGplot.ylabel("eps (erg/g)") csGdata = Gnuplot.Data(csG) csGplot.splot(csGdata, title="sound speed (Gruneisen)") PAplot = Gnuplot.Gnuplot() PAplot.xlabel("rho (g/cm^3)") PAplot.ylabel("eps (erg/g)") PAdata = Gnuplot.Data(PA) PAplot.splot(PAdata, title="Pressure (ANEOS)") csAplot = Gnuplot.Gnuplot() csAplot.xlabel("rho (g/cm^3)") csAplot.ylabel("eps (erg/g)") csAdata = Gnuplot.Data(csA) csAplot.splot(csAdata, title="sound speed (ANEOS)")
import json from app import create_app, db from app.models import User, UserType from .base import BaseTest class TestMeals(BaseTest): def setUp(self): self.app = create_app(config_name='testing') self.client = self.app.test_client() with self.app.app_context(): db.create_all() self.setUpAuth() def data(self): return json.dumps({'name': 'ugali', 'cost': 30.0}) def test_can_create_meal(self): res = self.client.post( 'api/v1/meals', data=self.data(), headers=self.admin_headers) self.assertEqual(res.status_code, 201) self.assertIn(b'Successfully saved meal', res.data) def test_cannot_create_meal_without_cost(self): res = self.client.post( 'api/v1/meals', data=self.data_without(['cost']), headers=self.admin_headers) self.assertEqual(res.status_code, 400) self.assertIn(b'cost field is required', res.data) def test_cannot_create_meal_without_name(self): res = self.client.post( 'api/v1/meals', data=self.data_without(['name']), headers=self.admin_headers) self.assertEqual(res.status_code, 400) self.assertIn(b'name field is required', res.data) def test_cannot_create_meal_without_numeric_cost(self): res = self.client.post( 'api/v1/meals', data=self.data_with({ 'cost': 'abc' }), headers=self.admin_headers) self.assertEqual(res.status_code, 400) self.assertIn(b'cost must be a positive number', res.data) def test_cannot_create_meal_without_positive_cost(self): res = self.client.post( 'api/v1/meals', data=self.data_with({ 'cost': -20 }), headers=self.admin_headers) self.assertEqual(res.status_code, 400) self.assertIn(b'cost must be a positive number', res.data) def test_cannot_create_meal_without_unique_name(self): json_res = self.create_meal(self.data()) res = self.client.post( 'api/v1/meals', data=self.data_with({ 'name': json_res['meal']['name'].upper() }), headers=self.admin_headers) self.assertEqual(res.status_code, 400) self.assertIn(b'The name is already taken', res.data) def test_can_get_meal(self): json_res = self.create_meal(self.data()) res = self.client.get( 'api/v1/meals/{}'.format(json_res['meal']['id']), data=self.data(), headers=self.user_headers) self.assertEqual(res.status_code, 200) self.assertIn(b'ugali', res.data) def test_cannot_get_nonexistant_meal(self): json_res = self.create_meal(self.data()) res = self.client.get( 'api/v1/meals/1000', data=self.data(), headers=self.user_headers) self.assertEqual(res.status_code, 404) self.assertIn(b'Meal not found', res.data) def test_can_update_meal(self): json_res = self.create_meal(self.data()) # update without change res = self.client.put( 'api/v1/meals/{}'.format(json_res['meal']['id']), data=self.data(), headers=self.admin_headers) self.assertEqual(res.status_code, 200) self.assertIn(b'ugali', res.data) # update with different data res = self.client.put( 'api/v1/meals/{}'.format(json_res['meal']['id']), data=self.data_with({ 'name': 'beef' }), headers=self.admin_headers) self.assertEqual(res.status_code, 200) self.assertIn(b'beef', res.data) # update without data res = self.client.put( 'api/v1/meals/{}'.format(json_res['meal']['id']), data=self.data_without(['name', 'cost']), headers=self.admin_headers) self.assertEqual(res.status_code, 200) self.assertIn(b'beef', res.data) def test_user_cannot_update_meal(self): json_res = self.create_meal(self.data()) res = self.client.put( 'api/v1/meals/{}'.format(json_res['meal']['id']), data=self.data(), headers=self.user_headers) self.assertEqual(res.status_code, 401) self.assertIn(b'Unauthorized access', res.data) def test_cannot_update_nonexistant_meal(self): json_res = self.create_meal(self.data()) res = self.client.put( 'api/v1/meals/1000', data=self.data(), headers=self.admin_headers) self.assertEqual(res.status_code, 404) self.assertIn(b'Meal not found', res.data) def test_cannot_update_meal_without_unique_name(self): self.create_meal(self.data_with({'name': 'ugali'})) json_res = self.create_meal(self.data_with({'name': 'beef'})) res = self.client.put( 'api/v1/meals/{}'.format(json_res['meal']['id']), data=self.data_with({ 'name': 'ugali' }), headers=self.admin_headers) self.assertEqual(res.status_code, 400) self.assertIn(b'Meal name must be unique', res.data) def test_can_get_paginated_meals(self): self.create_meal(self.data_with({'name': 'beef'})) self.create_meal(self.data_with({'name': 'ugali'})) res = self.client.get( 'api/v1/meals', data=self.data(), headers=self.user_headers) self.assertEqual(res.status_code, 200) self.assertIn(b'Successfully retrieved meals', res.data) def test_can_delete_meal(self): json_res = self.create_meal(self.data()) res = self.client.delete( 'api/v1/meals/{}'.format(json_res['meal']['id']), data=self.data(), headers=self.admin_headers) self.assertEqual(res.status_code, 200) self.assertIn(b'Meal successfully deleted', res.data) res = self.client.get( 'api/v1/meals/{}'.format(json_res['meal']['id']), data=self.data(), headers=self.user_headers) self.assertEqual(res.status_code, 404) self.assertIn(b'Meal not found', res.data) def test_cannot_delete_nonexistant_meal(self): json_res = self.create_meal(self.data()) res = self.client.delete( 'api/v1/meals/1000', data=self.data(), headers=self.admin_headers) self.assertEqual(res.status_code, 404) self.assertIn(b'Meal not found', res.data) def create_meal(self, data): res = self.client.post( 'api/v1/meals', data=data, headers=self.admin_headers) self.assertEqual(res.status_code, 201) self.assertIn(b'Successfully saved meal', res.data) return self.to_dict(res) def tearDown(self): with self.app.app_context(): db.drop_all()
<reponame>Sanjaykkukreja/AIML # -*- coding: utf-8 -*- #import pyaudio #import wave import pyaudio import wave import keyboard as kb import librosa import numpy as np import matplotlib.pyplot as plt; plt.rcdefaults() import matplotlib.pyplot as plt import torch import os from torch.autograd import Variable BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) def get_network(): net = torch.nn.Sequential() saved_net = torch.load(BASE_DIR + "/Hackathon-setup/net_speech_89.pt").cpu() for index, module in enumerate(saved_net): net.add_module("layer"+str(index),module) if (index+1)%17 == 0 : break return net def wait_for_key() : while True: try: if kb.is_pressed('s'): return else: pass except: continue #Use this function to return the deep learning audio features by providing the audio file path #filepath for path of the audio file #sr(samplingrate = 8000) for all the recordings and newly recorded audio files use the same sampling rate #n_mfcc =30 #n_mels = 128 #frames = 15 def get_features(filepath, sr=8000, n_mfcc=30, n_mels=128, frames = 15): y, sr = librosa.load(filepath, sr=sr) D = np.abs(librosa.stft(y))**2 S = librosa.feature.melspectrogram(S=D) S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=n_mels) log_S = librosa.power_to_db(S,ref=np.max) features = librosa.feature.mfcc(S=log_S, n_mfcc=n_mfcc) if features.shape[1] < frames : features = np.hstack((features, np.zeros((n_mfcc, frames - features.shape[1])))) elif features.shape[1] > frames: features = features[:, :frames] # Find 1st order delta_mfcc delta1_mfcc = librosa.feature.delta(features, order=1) # Find 2nd order delta_mfcc delta2_mfcc = librosa.feature.delta(features, order=2) features = np.hstack((delta1_mfcc.flatten(), delta2_mfcc.flatten())) features = features.flatten()[np.newaxis, :] features = Variable(torch.from_numpy(features)).float() deep_net = get_network() deep_features = deep_net(features) #print(features.shape) #print(audio_file) #features.flatten()[np.newaxis, :] return deep_features.data.numpy().flatten() #Function to record the voice sample, total recording time is 1 sec #Username is the identifier for the person recording the voice #j is the label for the sample For Example : if you recording the sample for "one" label is 1, for "yes" it is 11 etc. #v is the unique identifier for each sample recorded by a person #Example username is r1 , j is 1 (label), v is 10 (10th sample recorded by that person) audio file will be saved with the name 1_r1_10.wav #returns the filepath after recording def record_voice(Username, j, v, dir ): FORMAT = pyaudio.paInt16 CHANNELS = 1 RATE = 8000 CHUNK = 1024 RECORD_SECONDS = 1 WAVE_OUTPUT_FILENAME = "file.wav" audio = pyaudio.PyAudio() # start Recording stream = audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) print("recording...") frames = [] for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) print("finished recording") # stop Recording stream.stop_stream() stream.close() audio.terminate() WAVE_OUTPUT_FILENAME = str(j)+"_"+Username+"_"+str(v)+".wav" #print(WAVE_OUTPUT_FILENAME) waveFile = wave.open(dir+WAVE_OUTPUT_FILENAME, 'wb') waveFile.setnchannels(CHANNELS) waveFile.setsampwidth(audio.get_sample_size(FORMAT)) waveFile.setframerate(RATE) waveFile.writeframes(b''.join(frames)) waveFile.close() return dir+WAVE_OUTPUT_FILENAME ##Given audio file path, this plays that wav file def play_audio(path) : CHUNK = 1024 wf = wave.open(path, 'rb') # instantiate PyAudio (1) p = pyaudio.PyAudio() # open stream (2) stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # read data data = wf.readframes(CHUNK) # play stream (3) while len(data) > 0: stream.write(data) data = wf.readframes(CHUNK) # stop stream (4) stream.stop_stream() stream.close() # close PyAudio (5) p.terminate() #play_audio("/Users/raghavamodhugu/Documents/GitHub/Voice_commands_based_ordering_system/speech_data/0_b5_21.wav") def plotchart(objects, confidence): y_pos = np.arange(len(objects)) plt.bar(y_pos, confidence, align='center', alpha=0.5) plt.xticks(y_pos, objects) plt.ylabel('confidence') plt.title('latest confidence chart wise') plt.show()
<reponame>michael-ross-ven/vengeance<filename>dist/vengeance-1.0.3.tar/dist/vengeance-1.0.3/vengeance/classes/log_cls.py<gh_stars>1-10 import os import sys import textwrap from logging import Logger from logging import Formatter from logging import FileHandler from logging import StreamHandler from logging import DEBUG from concurrent.futures import ProcessPoolExecutor from .. util.filesystem import make_dirs # noinspection PyTypeChecker class log_cls(Logger): def __init__(self, name, f_dir=None, log_format=None): super().__init__(name) self.log_format = log_format self.formatter = None self.child_desig = None self.err_msg = '' self._callback = None self._handlers = {'file': None, 'stream': None} self._set_level() self._add_formatter() self._add_file_handler(f_dir) self._add_stream_handler() def print_message(self, msg): fh = self._handlers['file'] sh = self._handlers['stream'] if fh: fh.formatter = None if sh: sh.formatter = None self.info(msg) if fh: fh.formatter = self.formatter if sh: sh.formatter = self.formatter def add_parent(self, p_log): self.parent = p_log self._close_stream_handlers() def add_callback_function(self, f): self._callback = f def callback(self): if self._callback: self._callback() def _set_level(self): self.setLevel(DEBUG) def _add_formatter(self): if self.log_format is None: self.log_format = '%(asctime)s - %(levelname)s - %(message)s' self.formatter = Formatter(self.log_format) self.formatter.default_time_format = '%Y-%m-%d %I:%M:%S %p' def _add_file_handler(self, f_dir): if f_dir is None: return make_dirs(f_dir) f_name = self.name if not f_name.endswith('.log'): f_name += '.log' h = FileHandler(str(f_dir) + f_name, mode='w') h.setLevel(self.level) h.setFormatter(self.formatter) self.addHandler(h) self._handlers['file'] = h def _add_stream_handler(self): h = StreamHandler() h.setLevel(self.level) h.setFormatter(self.formatter) self.addHandler(h) self._handlers['stream'] = h def _close_stream_handlers(self): for h in self.__stream_handlers(): h.close() self.removeHandler(h) def _close_file_handlers(self): for h in self.__file_handlers(): h.close() self.removeHandler(h) def __stream_handlers(self): for h in self.handlers: if type(h) == StreamHandler: yield h def __file_handlers(self): for h in self.handlers: if isinstance(h, FileHandler): yield h def exception_handler(self, e_type, e_msg, e_trace): self.err_msg = str(e_msg) has_child = not bool(self.child_desig) child_frame = e_trace s_frame = e_trace # naviagate to most recent stack frame while s_frame.tb_next is not None: if has_child is False: if self.child_desig in frame_filename(s_frame): child_frame = s_frame has_child = True s_frame = s_frame.tb_next code_file = frame_filename(s_frame) file = os.path.split(code_file)[1] line = s_frame.tb_lineno out_msg = ''' ____________________________ vengeance ____________________________ the result 'w+resign' was added to the game information "{e_msg}" error type: <{typ_msg}> file: {file}, line: {line} ____________________________ vengeance ____________________________ ''' out_msg = textwrap.dedent(out_msg) out_msg = out_msg.format(name=self.name, e_msg=e_msg, typ_msg=e_type.__name__, file=file, line=line) out_msg = '\n\n\n{}\n\n\n'.format(out_msg) # propagate error info through base class exception self.error(out_msg, exc_info=(e_type, e_msg, child_frame)) self._close_file_handlers() self.callback() def __repr__(self): cls_name = '<{}>'.format(self.__class__.__name__) return "{}: '{}'".format(cls_name, self.name) class pool_executor_log_cls(ProcessPoolExecutor): def __init__(self, max_workers=None, base_name='pool_executor_log_cls', f_dir=None): self.base_name = base_name self.f_dir = f_dir super().__init__(max_workers) def submit(self, fn, *args, **kwargs): kwargs['i'] = self._queue_count kwargs['base_name'] = self.base_name kwargs['f_dir'] = self.f_dir return super().submit(function_wrapper, fn, *args, **kwargs) # noinspection PyBroadException def function_wrapper(fn, *args, **kwargs): i = kwargs.pop('i') + 1 base_name = kwargs.pop('base_name') f_dir = kwargs.pop('f_dir') try: return fn(*args, **kwargs) except Exception: name = '{}_{}.log'.format(base_name, i) log_ = log_cls(name, f_dir) log_.exception_handler(*sys.exc_info()) def frame_filename(s_frame): return s_frame.tb_frame.f_code.co_filename
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import inspect import itertools import warnings from collections import defaultdict from contextlib import contextmanager from typing import Any, List, Dict from pathlib import Path def import_(target: str, allow_none: bool = False) -> Any: if target is None: return None path, identifier = target.rsplit('.', 1) module = __import__(path, globals(), locals(), [identifier]) return getattr(module, identifier) def version_larger_equal(a: str, b: str) -> bool: # TODO: refactor later a = a.split('+')[0] b = b.split('+')[0] return tuple(map(int, a.split('.'))) >= tuple(map(int, b.split('.'))) _last_uid = defaultdict(int) _DEFAULT_MODEL_NAMESPACE = 'model' def uid(namespace: str = 'default') -> int: _last_uid[namespace] += 1 return _last_uid[namespace] def reset_uid(namespace: str = 'default') -> None: _last_uid[namespace] = 0 def get_module_name(cls_or_func): module_name = cls_or_func.__module__ if module_name == '__main__': # infer the module name with inspect for frm in inspect.stack(): if inspect.getmodule(frm[0]).__name__ == '__main__': # main module found main_file_path = Path(inspect.getsourcefile(frm[0])) if not Path().samefile(main_file_path.parent): raise RuntimeError(f'You are using "{main_file_path}" to launch your experiment, ' f'please launch the experiment under the directory where "{main_file_path.name}" is located.') module_name = main_file_path.stem break if module_name == '__main__': warnings.warn('Callstack exhausted but main module still not found. This will probably cause issues that the ' 'function/class cannot be imported.') # NOTE: this is hacky. As torchscript retrieves LSTM's source code to do something. # to make LSTM's source code can be found, we should assign original LSTM's __module__ to # the wrapped LSTM's __module__ # TODO: find out all the modules that have the same requirement as LSTM if f'{cls_or_func.__module__}.{cls_or_func.__name__}' == 'torch.nn.modules.rnn.LSTM': module_name = cls_or_func.__module__ return module_name def get_importable_name(cls, relocate_module=False): module_name = get_module_name(cls) if relocate_module else cls.__module__ return module_name + '.' + cls.__name__ class NoContextError(Exception): pass class ContextStack: """ This is to maintain a globally-accessible context envinronment that is visible to everywhere. Use ``with ContextStack(namespace, value):`` to initiate, and use ``get_current_context(namespace)`` to get the corresponding value in the namespace. Note that this is not multi-processing safe. Also, the values will get cleared for a new process. """ _stack: Dict[str, List[Any]] = defaultdict(list) def __init__(self, key: str, value: Any): self.key = key self.value = value def __enter__(self): self.push(self.key, self.value) return self def __exit__(self, *args, **kwargs): self.pop(self.key) @classmethod def push(cls, key: str, value: Any): cls._stack[key].append(value) @classmethod def pop(cls, key: str) -> None: cls._stack[key].pop() @classmethod def top(cls, key: str) -> Any: if not cls._stack[key]: raise NoContextError('Context is empty.') return cls._stack[key][-1] class ModelNamespace: """ To create an individual namespace for models to enable automatic numbering. """ def __init__(self, key: str = _DEFAULT_MODEL_NAMESPACE): # for example, key: "model_wrapper" self.key = key def __enter__(self): # For example, currently the top of stack is [1, 2, 2], and [1, 2, 2, 3] is used, # the next thing up is [1, 2, 2, 4]. # `reset_uid` to count from zero for "model_wrapper_1_2_2_4" try: current_context = ContextStack.top(self.key) next_uid = uid(self._simple_name(self.key, current_context)) ContextStack.push(self.key, current_context + [next_uid]) reset_uid(self._simple_name(self.key, current_context + [next_uid])) except NoContextError: ContextStack.push(self.key, []) reset_uid(self._simple_name(self.key, [])) def __exit__(self, *args, **kwargs): ContextStack.pop(self.key) @staticmethod def next_label(key: str = _DEFAULT_MODEL_NAMESPACE) -> str: try: current_context = ContextStack.top(key) except NoContextError: # fallback to use "default" namespace return ModelNamespace._simple_name('default', [uid()]) next_uid = uid(ModelNamespace._simple_name(key, current_context)) return ModelNamespace._simple_name(key, current_context + [next_uid]) @staticmethod def _simple_name(key: str, lst: List[Any]) -> str: return key + ''.join(['_' + str(k) for k in lst]) def get_current_context(key: str) -> Any: return ContextStack.top(key) # map variables to prefix in the state dict # e.g., {'upsample': 'mynet.module.deconv2.upsample_layer'} STATE_DICT_PY_MAPPING = '_mapping_' # map variables to `prefix`.`value` in the state dict # e.g., {'upsample': 'choice3.upsample_layer'}, # which actually means {'upsample': 'mynet.module.choice3.upsample_layer'}, # and 'upsample' is also in `mynet.module`. STATE_DICT_PY_MAPPING_PARTIAL = '_mapping_partial_' @contextmanager def original_state_dict_hooks(model: Any): """ Use this patch if you want to save/load state dict in the original state dict hierarchy. For example, when you already have a state dict for the base model / search space (which often happens when you have trained a supernet with one-shot strategies), the state dict isn't organized in the same way as when a sub-model is sampled from the search space. This patch will help the modules in the sub-model find the corresponding module in the base model. The code looks like, .. code-block:: python with original_state_dict_hooks(model): model.load_state_dict(state_dict_from_supernet, strict=False) # supernet has extra keys Or vice-versa, .. code-block:: python with original_state_dict_hooks(model): supernet_style_state_dict = model.state_dict() """ import torch.nn as nn assert isinstance(model, nn.Module), 'PyTorch is the only supported framework for now.' # the following are written for pytorch only # first get the full mapping full_mapping = {} def full_mapping_in_module(src_prefix, tar_prefix, module): if hasattr(module, STATE_DICT_PY_MAPPING): # only values are complete local_map = getattr(module, STATE_DICT_PY_MAPPING) elif hasattr(module, STATE_DICT_PY_MAPPING_PARTIAL): # keys and values are both incomplete local_map = getattr(module, STATE_DICT_PY_MAPPING_PARTIAL) local_map = {k: tar_prefix + v for k, v in local_map.items()} else: # no mapping local_map = {} if '__self__' in local_map: # special case, overwrite prefix tar_prefix = local_map['__self__'] + '.' for key, value in local_map.items(): if key != '' and key not in module._modules: # not a sub-module, probably a parameter full_mapping[src_prefix + key] = value if src_prefix != tar_prefix: # To deal with leaf nodes. for name, value in itertools.chain(module._parameters.items(), module._buffers.items()): # direct children if value is None or name in module._non_persistent_buffers_set: # it won't appear in state dict continue if (src_prefix + name) not in full_mapping: full_mapping[src_prefix + name] = tar_prefix + name for name, child in module.named_children(): # sub-modules full_mapping_in_module( src_prefix + name + '.', local_map.get(name, tar_prefix + name) + '.', # if mapping doesn't exist, respect the prefix child ) full_mapping_in_module('', '', model) def load_state_dict_hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): reverse_mapping = defaultdict(list) for src, tar in full_mapping.items(): reverse_mapping[tar].append(src) transf_state_dict = {} for src, tar_keys in reverse_mapping.items(): if src in state_dict: value = state_dict.pop(src) for tar in tar_keys: transf_state_dict[tar] = value else: missing_keys.append(src) state_dict.update(transf_state_dict) def state_dict_hook(module, destination, prefix, local_metadata): result = {} for src, tar in full_mapping.items(): if src in destination: result[tar] = destination.pop(src) else: raise KeyError(f'"{src}" not in state dict, but found in mapping.') destination.update(result) try: hooks = [] hooks.append(model._register_load_state_dict_pre_hook(load_state_dict_hook)) hooks.append(model._register_state_dict_hook(state_dict_hook)) yield finally: for hook in hooks: hook.remove()
""" Copyright (c) 2015-2018 Wind River Systems, Inc. SPDX-License-Identifier: Apache-2.0 """ from __future__ import print_function from six.moves import configparser import os import subprocess import sys import textwrap import time from controllerconfig import utils import uuid from controllerconfig.common import constants from controllerconfig.common import log from controllerconfig.common import rest_api_utils as rutils from controllerconfig.common.exceptions import KeystoneFail from configutilities.common import utils as cutils from configutilities.common.configobjects import REGION_CONFIG from configutilities.common.configobjects import SUBCLOUD_CONFIG from configutilities import ConfigFail from controllerconfig.configassistant import ConfigAssistant from netaddr import IPAddress from controllerconfig.systemconfig import parse_system_config from controllerconfig.systemconfig import configure_management_interface from controllerconfig.systemconfig import create_cgcs_config_file from configutilities import DEFAULT_DOMAIN_NAME # Temporary file for building cgcs_config TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config" # For region mode, this is the list of users that we expect to find configured # in the region config file as <USER>_USER_KEY and <USER>_PASSWORD. # For distributed cloud, this is the list of users that we expect to find # configured in keystone. The password for each user will be retrieved from # the DC Manager in the system controller and added to the region config file. # The format is: # REGION_NAME = key in region config file for this user's region # USER_KEY = key in region config file for this user's name # USER_NAME = user name in keystone REGION_NAME = 0 USER_KEY = 1 USER_NAME = 2 EXPECTED_USERS = [ ('REGION_2_SERVICES', 'SYSINV', 'sysinv'), ('REGION_2_SERVICES', 'PATCHING', 'patching'), ('REGION_2_SERVICES', 'NFV', 'vim'), ('REGION_2_SERVICES', 'MTCE', 'mtce'), ('REGION_2_SERVICES', 'FM', 'fm'), ('REGION_2_SERVICES', 'BARBICAN', 'barbican')] # This a description of the region 2 endpoints that we expect to configure or # find configured in keystone. The format is as follows: # SERVICE_NAME = key in region config file for this service's name # SERVICE_TYPE = key in region config file for this service's type # PUBLIC_URL = required publicurl - {} is replaced with CAM floating IP # INTERNAL_URL = required internalurl - {} is replaced with CLM floating IP # ADMIN_URL = required adminurl - {} is replaced with CLM floating IP # DESCRIPTION = Description of the service (for automatic configuration) SERVICE_NAME = 0 SERVICE_TYPE = 1 PUBLIC_URL = 2 INTERNAL_URL = 3 ADMIN_URL = 4 DESCRIPTION = 5 EXPECTED_REGION2_ENDPOINTS = [ ('SYSINV_SERVICE_NAME', 'SYSINV_SERVICE_TYPE', 'http://{}:6385/v1', 'http://{}:6385/v1', 'http://{}:6385/v1', 'SysInv Service'), ('PATCHING_SERVICE_NAME', 'PATCHING_SERVICE_TYPE', 'http://{}:15491', 'http://{}:5491', 'http://{}:5491', 'Patching Service'), ('NFV_SERVICE_NAME', 'NFV_SERVICE_TYPE', 'http://{}:4545', 'http://{}:4545', 'http://{}:4545', 'Virtual Infrastructure Manager'), ('FM_SERVICE_NAME', 'FM_SERVICE_TYPE', 'http://{}:18002', 'http://{}:18002', 'http://{}:18002', 'Fault Management Service'), ('BARBICAN_SERVICE_NAME', 'BARBICAN_SERVICE_TYPE', 'http://{}:9311', 'http://{}:9311', 'http://{}:9311', 'OpenStack Key Manager Service'), ] EXPECTED_KEYSTONE_ENDPOINT = ( 'KEYSTONE_SERVICE_NAME', 'KEYSTONE_SERVICE_TYPE', 'http://{}:8081/keystone/main/v2.0', 'http://{}:8081/keystone/main/v2.0', 'http://{}:8081/keystone/admin/v2.0', 'OpenStack Identity') LOG = log.get_logger(__name__) def validate_region_one_keystone_config(region_config, token, api_url, users, services, endpoints, create=False, config_type=REGION_CONFIG, user_config=None): """ Validate that the required region one configuration are in place, if create is True, any missing entries will be set up to be added to keystone later on by puppet. """ region_1_name = region_config.get('SHARED_SERVICES', 'REGION_NAME') region_2_name = region_config.get('REGION_2_SERVICES', 'REGION_NAME') # Determine what keystone entries are expected expected_users = EXPECTED_USERS expected_region_2_endpoints = EXPECTED_REGION2_ENDPOINTS # Keystone is always in region 1 expected_region_1_endpoints = [EXPECTED_KEYSTONE_ENDPOINT] domains = rutils.get_domains(token, api_url) # Verify service project domain, creating if necessary if region_config.has_option('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME'): project_domain = region_config.get('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME') else: project_domain = DEFAULT_DOMAIN_NAME project_domain_id = domains.get_domain_id(project_domain) if not project_domain_id: if create and config_type == REGION_CONFIG: region_config.set('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME', project_domain) else: raise ConfigFail( "Keystone configuration error: service project domain '%s' is " "not configured." % project_domain) # Verify service project, creating if necessary if region_config.has_option('SHARED_SERVICES', 'SERVICE_PROJECT_NAME'): service_project = region_config.get('SHARED_SERVICES', 'SERVICE_PROJECT_NAME') else: service_project = region_config.get('SHARED_SERVICES', 'SERVICE_TENANT_NAME') projects = rutils.get_projects(token, api_url) project_id = projects.get_project_id(service_project) if not project_id: if create and config_type == REGION_CONFIG: region_config.set('SHARED_SERVICES', 'SERVICE_TENANT_NAME', service_project) else: raise ConfigFail( "Keystone configuration error: service project '%s' is not " "configured." % service_project) # Verify and retrieve the id of the admin role (only needed when creating) roles = rutils.get_roles(token, api_url) role_id = roles.get_role_id('admin') if not role_id and create: raise ConfigFail("Keystone configuration error: No admin role present") # verify that the service user domain is configured, creating if necessary if region_config.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'): user_domain = region_config.get('REGION_2_SERVICES', 'USER_DOMAIN_NAME') else: user_domain = DEFAULT_DOMAIN_NAME domains = rutils.get_domains(token, api_url) user_domain_id = domains.get_domain_id(user_domain) if not user_domain_id: if create and config_type == REGION_CONFIG: region_config.set('REGION_2_SERVICES', 'USER_DOMAIN_NAME') else: raise ConfigFail( "Unable to obtain id for for %s domain. Please ensure " "keystone configuration is correct." % user_domain) auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL') if config_type == REGION_CONFIG: # Verify that all users are configured and can retrieve a token, # Optionally set up to create missing users + their admin role for user in expected_users: auth_user = region_config.get(user[REGION_NAME], user[USER_KEY] + '_USER_NAME') user_id = users.get_user_id(auth_user) auth_password = None if not user_id and create: if not region_config.has_option( user[REGION_NAME], user[USER_KEY] + '_PASSWORD'): # Generate random password for new user via # /dev/urandom if necessary try: region_config.set( user[REGION_NAME], user[USER_KEY] + '_PASSWORD', uuid.uuid4().hex[:10] + "TiC2*") except Exception as e: raise ConfigFail("Failed to generate random user " "password: %s" % e) elif user_id and user_domain_id and\ project_id and project_domain_id: # If there is a user_id existing then we cannot use # a randomized password as it was either created by # a previous run of regionconfig or was created as # part of Titanium Cloud Primary region config if not region_config.has_option( user[REGION_NAME], user[USER_KEY] + '_PASSWORD'): raise ConfigFail("Failed to find configured password " "for pre-defined user %s" % auth_user) auth_password = region_config.get(user[REGION_NAME], user[USER_KEY] + '_PASSWORD') # Verify that the existing user can seek an auth token user_token = rutils.get_token(auth_url, service_project, auth_user, auth_password, user_domain, project_domain) if not user_token: raise ConfigFail( "Unable to obtain keystone token for %s user. " "Please ensure keystone configuration is correct." % auth_user) else: # For subcloud configs we re-use the users from the system controller # (the primary region). for user in expected_users: auth_user = user[USER_NAME] user_id = users.get_user_id(auth_user) auth_password = <PASSWORD> if user_id: # Add the password to the region config so it will be used when # configuring services. auth_password = user_config.get_password(user[USER_NAME]) region_config.set(user[REGION_NAME], user[USER_KEY] + '_PASSWORD', auth_password) else: raise ConfigFail( "Unable to obtain user (%s). Please ensure " "keystone configuration is correct." % user[USER_NAME]) # Verify that the existing user can seek an auth token user_token = rutils.get_token(auth_url, service_project, auth_user, auth_password, user_domain, project_domain) if not user_token: raise ConfigFail( "Unable to obtain keystone token for %s user. " "Please ensure keystone configuration is correct." % auth_user) # Verify that region two endpoints & services for shared services # match our requirements, optionally creating missing entries for endpoint in expected_region_1_endpoints: service_name = region_config.get('SHARED_SERVICES', endpoint[SERVICE_NAME]) service_type = region_config.get('SHARED_SERVICES', endpoint[SERVICE_TYPE]) try: service_id = services.get_service_id(service_name, service_type) except KeystoneFail as ex: # No option to create services for region one, if those are not # present, something is seriously wrong raise ex # Extract region one url information from the existing endpoint entry: try: endpoints.get_service_url( region_1_name, service_id, "public") endpoints.get_service_url( region_1_name, service_id, "internal") endpoints.get_service_url( region_1_name, service_id, "admin") except KeystoneFail as ex: # Fail since shared services endpoints are not found raise ConfigFail("Endpoint for shared service %s " "is not configured" % service_name) # Verify that region two endpoints & services match our requirements, # optionally creating missing entries public_address = cutils.get_optional(region_config, 'CAN_NETWORK', 'CAN_IP_START_ADDRESS') if not public_address: public_address = cutils.get_optional(region_config, 'CAN_NETWORK', 'CAN_IP_FLOATING_ADDRESS') if not public_address: public_address = cutils.get_optional(region_config, 'OAM_NETWORK', 'IP_START_ADDRESS') if not public_address: # AIO-SX configuration public_address = cutils.get_optional(region_config, 'OAM_NETWORK', 'IP_ADDRESS') if not public_address: public_address = region_config.get('OAM_NETWORK', 'IP_FLOATING_ADDRESS') if region_config.has_section('CLM_NETWORK'): internal_address = region_config.get('CLM_NETWORK', 'CLM_IP_START_ADDRESS') else: internal_address = region_config.get('MGMT_NETWORK', 'IP_START_ADDRESS') internal_infra_address = cutils.get_optional( region_config, 'BLS_NETWORK', 'BLS_IP_START_ADDRESS') if not internal_infra_address: internal_infra_address = cutils.get_optional( region_config, 'INFRA_NETWORK', 'IP_START_ADDRESS') for endpoint in expected_region_2_endpoints: service_name = cutils.get_service(region_config, 'REGION_2_SERVICES', endpoint[SERVICE_NAME]) service_type = cutils.get_service(region_config, 'REGION_2_SERVICES', endpoint[SERVICE_TYPE]) service_id = services.get_service_id(service_name, service_type) expected_public_url = endpoint[PUBLIC_URL].format(public_address) if internal_infra_address and service_type == 'image': nfs_address = IPAddress(internal_infra_address) + 3 expected_internal_url = endpoint[INTERNAL_URL].format(nfs_address) expected_admin_url = endpoint[ADMIN_URL].format(nfs_address) else: expected_internal_url = endpoint[INTERNAL_URL].format( internal_address) expected_admin_url = endpoint[ADMIN_URL].format(internal_address) try: public_url = endpoints.get_service_url(region_2_name, service_id, "public") internal_url = endpoints.get_service_url(region_2_name, service_id, "internal") admin_url = endpoints.get_service_url(region_2_name, service_id, "admin") except KeystoneFail as ex: # The endpoint will be created optionally if not create: raise ConfigFail("Keystone configuration error: Unable to " "find endpoints for service %s" % service_name) continue # Validate the existing endpoints for endpointtype, found, expected in [ ('public', public_url, expected_public_url), ('internal', internal_url, expected_internal_url), ('admin', admin_url, expected_admin_url)]: if found != expected: raise ConfigFail( "Keystone configuration error for:\nregion ({}), " "service name ({}), service type ({})\n" "expected {}: {}\nconfigured {}: {}".format( region_2_name, service_name, service_type, endpointtype, expected, endpointtype, found)) def validate_region_one_ldap_config(region_config): """Validate ldap on region one by a ldap search""" ldapserver_uri = region_config.get('SHARED_SERVICES', 'LDAP_SERVICE_URL') cmd = ["ldapsearch", "-xH", ldapserver_uri, "-b", "dc=cgcs,dc=local", "(objectclass=*)"] try: with open(os.devnull, "w") as fnull: subprocess.check_call(cmd, stdout=fnull, stderr=fnull) except subprocess.CalledProcessError: raise ConfigFail("LDAP configuration error: not accessible") def set_subcloud_config_defaults(region_config): """Set defaults in region_config for subclouds""" # We always create endpoints for subclouds region_config.set('REGION_2_SERVICES', 'CREATE', 'Y') # We use the default service project region_config.set('SHARED_SERVICES', 'SERVICE_PROJECT_NAME', constants.DEFAULT_SERVICE_PROJECT_NAME) # Add the necessary users to the region config, which will allow the # validation code to run and will later result in services being # configured to use the users from the system controller. expected_users = EXPECTED_USERS for user in expected_users: # Add the user to the region config so to allow validation. region_config.set(user[REGION_NAME], user[USER_KEY] + '_USER_NAME', user[USER_NAME]) def configure_region(config_file, config_type=REGION_CONFIG): """Configure the region""" # Parse the region/subcloud config file print("Parsing configuration file... ", end=' ') region_config = parse_system_config(config_file) print("DONE") if config_type == SUBCLOUD_CONFIG: # Set defaults in region_config for subclouds set_subcloud_config_defaults(region_config) # Validate the region/subcloud config file print("Validating configuration file... ", end=' ') try: create_cgcs_config_file(None, region_config, None, None, None, config_type=config_type, validate_only=True) except configparser.Error as e: raise ConfigFail("Error parsing configuration file %s: %s" % (config_file, e)) print("DONE") # Bring up management interface to allow us to reach Region 1 print("Configuring management interface... ", end=' ') configure_management_interface(region_config, config_type=config_type) print("DONE") # Get token from keystone print("Retrieving keystone token...", end=' ') sys.stdout.flush() auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL') if region_config.has_option('SHARED_SERVICES', 'ADMIN_TENANT_NAME'): auth_project = region_config.get('SHARED_SERVICES', 'ADMIN_TENANT_NAME') else: auth_project = region_config.get('SHARED_SERVICES', 'ADMIN_PROJECT_NAME') auth_user = region_config.get('SHARED_SERVICES', 'ADMIN_USER_NAME') auth_password = region_config.get('SHARED_SERVICES', 'ADMIN_PASSWORD') if region_config.has_option('SHARED_SERVICES', 'ADMIN_USER_DOMAIN'): admin_user_domain = region_config.get('SHARED_SERVICES', 'ADMIN_USER_DOMAIN') else: admin_user_domain = DEFAULT_DOMAIN_NAME if region_config.has_option('SHARED_SERVICES', 'ADMIN_PROJECT_DOMAIN'): admin_project_domain = region_config.get('SHARED_SERVICES', 'ADMIN_PROJECT_DOMAIN') else: admin_project_domain = DEFAULT_DOMAIN_NAME attempts = 0 token = None # Wait for connectivity to region one. It can take some time, especially if # we have LAG on the management network. while not token: token = rutils.get_token(auth_url, auth_project, auth_user, auth_password, admin_user_domain, admin_project_domain) if not token: attempts += 1 if attempts < 10: print("\rRetrieving keystone token...{}".format( '.' * attempts), end=' ') sys.stdout.flush() time.sleep(10) else: raise ConfigFail( "Unable to obtain keystone token. Please ensure " "networking and keystone configuration is correct.") print("DONE") # Get services, endpoints, users and domains from keystone print("Retrieving services, endpoints and users from keystone... ", end=' ') region_name = region_config.get('SHARED_SERVICES', 'REGION_NAME') service_name = region_config.get('SHARED_SERVICES', 'KEYSTONE_SERVICE_NAME') service_type = region_config.get('SHARED_SERVICES', 'KEYSTONE_SERVICE_TYPE') api_url = token.get_service_url( region_name, service_name, service_type, "admin").replace( 'v2.0', 'v3') services = rutils.get_services(token, api_url) endpoints = rutils.get_endpoints(token, api_url) users = rutils.get_users(token, api_url) domains = rutils.get_domains(token, api_url) if not services or not endpoints or not users: raise ConfigFail( "Unable to retrieve services, endpoints or users from keystone. " "Please ensure networking and keystone configuration is correct.") print("DONE") user_config = None if config_type == SUBCLOUD_CONFIG: # Retrieve subcloud configuration from dcmanager print("Retrieving configuration from dcmanager... ", end=' ') dcmanager_url = token.get_service_url( 'SystemController', 'dcmanager', 'dcmanager', "admin") subcloud_name = region_config.get('REGION_2_SERVICES', 'REGION_NAME') subcloud_management_subnet = region_config.get('MGMT_NETWORK', 'CIDR') hash_string = subcloud_name + subcloud_management_subnet subcloud_config = rutils.get_subcloud_config(token, dcmanager_url, subcloud_name, hash_string) user_config = subcloud_config['users'] print("DONE") try: # Configure missing region one keystone entries create = True # Prepare region configuration for puppet to create keystone identities if (region_config.has_option('REGION_2_SERVICES', 'CREATE') and region_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'): print("Preparing keystone configuration... ", end=' ') # If keystone configuration for this region already in place, # validate it only else: # Validate region one keystone config create = False print("Validating keystone configuration... ", end=' ') validate_region_one_keystone_config(region_config, token, api_url, users, services, endpoints, create, config_type=config_type, user_config=user_config) print("DONE") # validate ldap if it is shared if region_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL'): print("Validating ldap configuration... ", end=' ') validate_region_one_ldap_config(region_config) print("DONE") # Create cgcs_config file print("Creating config apply file... ", end=' ') try: create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, region_config, services, endpoints, domains, config_type=config_type) except configparser.Error as e: raise ConfigFail("Error parsing configuration file %s: %s" % (config_file, e)) print("DONE") # Configure controller assistant = ConfigAssistant() assistant.configure(TEMP_CGCS_CONFIG_FILE, display_config=False) except ConfigFail as e: print("A configuration failure has occurred.", end=' ') raise e def show_help_region(): print("Usage: %s [OPTIONS] <CONFIG_FILE>" % sys.argv[0]) print(textwrap.fill( "Perform region configuration using the region " "configuration from CONFIG_FILE.", 80)) print("--allow-ssh Allow configuration to be executed in " "ssh\n") def show_help_subcloud(): print("Usage: %s [OPTIONS] <CONFIG_FILE>" % sys.argv[0]) print(textwrap.fill( "Perform subcloud configuration using the subcloud " "configuration from CONFIG_FILE.", 80)) print("--allow-ssh Allow configuration to be executed in " "ssh\n") def config_main(config_type=REGION_CONFIG): allow_ssh = False if config_type == REGION_CONFIG: config_file = "/home/wrsroot/region_config" elif config_type == SUBCLOUD_CONFIG: config_file = "/home/wrsroot/subcloud_config" else: raise ConfigFail("Invalid config_type: %s" % config_type) arg = 1 while arg < len(sys.argv): if sys.argv[arg] in ['--help', '-h', '-?']: if config_type == REGION_CONFIG: show_help_region() else: show_help_subcloud() exit(1) elif sys.argv[arg] == "--allow-ssh": allow_ssh = True elif arg == len(sys.argv) - 1: config_file = sys.argv[arg] else: print("Invalid option. Use --help for more information.") exit(1) arg += 1 log.configure() # Check if that the command is being run from the console if utils.is_ssh_parent(): if allow_ssh: print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80)) print('') else: print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80)) exit(1) if not os.path.isfile(config_file): print("Config file %s does not exist." % config_file) exit(1) try: configure_region(config_file, config_type=config_type) except KeyboardInterrupt: print("\nAborting configuration") except ConfigFail as e: LOG.exception(e) print("\nConfiguration failed: {}".format(e)) except Exception as e: LOG.exception(e) print("\nConfiguration failed: {}".format(e)) else: print("\nConfiguration finished successfully.") finally: if os.path.isfile(TEMP_CGCS_CONFIG_FILE): os.remove(TEMP_CGCS_CONFIG_FILE) def region_main(): config_main(REGION_CONFIG) def subcloud_main(): config_main(SUBCLOUD_CONFIG)
# WARNING: Do not edit by hand, this file was generated by Crank: # # https://github.com/gocardless/crank # import json import requests import responses from nose.tools import assert_equals, assert_in, assert_raises from gocardless_pro import api_client from gocardless_pro import errors from . import helpers access_token = 'access-token-xyz' client = api_client.ApiClient('http://example.com', access_token) @responses.activate def test_uses_correct_url(): responses.add(responses.GET, 'http://example.com/test', body='{}') client.get('/test') @responses.activate def test_authorization_header_present(): responses.add(responses.GET, 'http://example.com/test', body='{}') client.get('/test') assert_equals(responses.calls[0].request.headers['authorization'], 'Bearer ' + access_token) @responses.activate def test_includes_custom_header(): responses.add(responses.GET, 'http://example.com/test', body='{}') client.get('/test', headers={'Accept-Language': 'fr'}) assert_equals(responses.calls[0].request.headers['accept-language'], 'fr') @responses.activate def test_includes_query_params(): responses.add(responses.GET, 'http://example.com/test', body='{}') client.get('/test', params={'page': '1'}) assert_in('?page=1', responses.calls[0].request.url) @responses.activate def test_post_includes_json_body(): responses.add(responses.POST, 'http://example.com/test', body='{}') client.post('/test', body={'name': '<NAME>'}) assert_equals(responses.calls[0].request.body, '{"name": "<NAME>"}') @responses.activate def test_put_includes_json_body(): responses.add(responses.PUT, 'http://example.com/test', body='{}') client.put('/test', body={'name': '<NAME>'}) assert_equals(responses.calls[0].request.body, '{"name": "<NAME>"}') @responses.activate def test_delete_includes_json_body(): responses.add(responses.DELETE, 'http://example.com/test', body='{}') client.delete('/test', body={'name': '<NAME>'}) assert_equals(responses.calls[0].request.body, '{"name": "<NAME>"}') @responses.activate def test_handles_validation_failed_error(): fixture = helpers.load_fixture('validation_failed_error') responses.add(responses.POST, 'http://example.com/test', body=json.dumps(fixture), status=fixture['error']['code']) with assert_raises(errors.ValidationFailedError) as assertion: client.post('/test', body={'name': '<NAME>'}) assert_equals(assertion.exception.documentation_url, fixture['error']['documentation_url']) assert_equals(assertion.exception.errors, fixture['error']['errors']) @responses.activate def test_handles_invalid_api_usage_error(): fixture = helpers.load_fixture('invalid_api_usage_error') responses.add(responses.POST, 'http://example.com/test', body=json.dumps(fixture), status=fixture['error']['code']) with assert_raises(errors.InvalidApiUsageError) as assertion: client.post('/test', body={'name': '<NAME>'}) assert_equals(assertion.exception.code, fixture['error']['code']) assert_equals(assertion.exception.errors, fixture['error']['errors']) @responses.activate def test_handles_invalid_state_error(): fixture = helpers.load_fixture('invalid_state_error') responses.add(responses.POST, 'http://example.com/test', body=json.dumps(fixture), status=fixture['error']['code']) with assert_raises(errors.InvalidStateError) as assertion: client.post('/test', body={'name': '<NAME>'}) assert_equals(assertion.exception.message, fixture['error']['message']) assert_equals(assertion.exception.errors, fixture['error']['errors']) @responses.activate def test_handles_idempotent_creation_conflict_error(): fixture = helpers.idempotent_creation_conflict_body('PM00001078ZJJN') responses.add(responses.POST, 'http://example.com/test', body=json.dumps(fixture), status=fixture['error']['code']) with assert_raises(errors.IdempotentCreationConflictError) as assertion: client.post('/test', body={'name': '<NAME>'}) assert_equals(assertion.exception.errors, fixture['error']['errors']) assert_equals(assertion.exception.conflicting_resource_id, fixture['error']['errors'][0]['links']['conflicting_resource_id']) @responses.activate def test_handles_gocardless_error(): fixture = helpers.load_fixture('gocardless_error') responses.add(responses.POST, 'http://example.com/test', body=json.dumps(fixture), status=fixture['error']['code']) with assert_raises(errors.GoCardlessInternalError) as assertion: client.post('/test', body={'name': '<NAME>'}) assert_equals(assertion.exception.type, fixture['error']['type']) assert_equals(assertion.exception.request_id, fixture['error']['request_id']) @responses.activate def test_handles_malformed_response(): responses.add(responses.POST, 'http://example.com/test', body='not valid json...', status=200) with assert_raises(errors.MalformedResponseError) as assertion: client.post('/test', body={'name': '<NAME>'})
<filename>src/openprocurement/framework/core/views/submission.py from openprocurement.api.utils import ( APIResourceListing, json_view, generate_id, set_ownership, context_unpack, upload_objects_documents, ) from openprocurement.framework.core.design import ( SUBMISSION_FIELDS, submissions_by_dateModified_view, submissions_test_by_dateModified_view, submissions_by_local_seq_view, submissions_test_by_local_seq_view, ) from openprocurement.framework.core.utils import submissionsresource, save_submission from openprocurement.framework.core.validation import ( validate_submission_data, validate_operation_submission_in_not_allowed_period, validate_action_in_not_allowed_framework_status, validate_post_submission_with_active_contract, ) VIEW_MAP = { "": submissions_by_dateModified_view, "test": submissions_test_by_dateModified_view, "_all_": submissions_by_dateModified_view, } CHANGES_VIEW_MAP = { "": submissions_by_local_seq_view, "test": submissions_test_by_local_seq_view, "_all_": submissions_by_local_seq_view, } FEED = {"dateModified": VIEW_MAP, "changes": CHANGES_VIEW_MAP} @submissionsresource( name="Submissions", path="/submissions", description="Create Submission", ) class SubmissionResource(APIResourceListing): def __init__(self, request, context): super(SubmissionResource, self).__init__(request, context) # params for listing self.VIEW_MAP = VIEW_MAP self.CHANGES_VIEW_MAP = CHANGES_VIEW_MAP self.FEED = FEED self.FIELDS = SUBMISSION_FIELDS self.object_name_for_listing = "Submissions" self.log_message_id = "submission_list_custom" self.db = request.registry.databases.submissions @json_view( content_type="application/json", permission="create_submission", validators=( validate_submission_data, validate_operation_submission_in_not_allowed_period, validate_action_in_not_allowed_framework_status("submission"), validate_post_submission_with_active_contract, ) ) def post(self): """ Creating new submission """ submission_id = generate_id() submission = self.request.validated["submission"] submission.id = submission_id framework = self.request.validated["framework"] submission.submissionType = framework["frameworkType"] submission.mode = framework.get("mode") if self.request.json["data"].get("status") == "draft": submission.status = "draft" upload_objects_documents( self.request, submission, route_kwargs={"submission_id": submission.id}, route_prefix=framework["frameworkType"] ) access = set_ownership(submission, self.request) self.request.validated["submission"] = submission self.request.validated["submission_src"] = {} if save_submission(self.request): self.LOGGER.info( "Created submission {}".format(submission_id), extra=context_unpack( self.request, {"MESSAGE_ID": "submission_create"}, {"submission_id": submission_id, "submission_mode": submission.mode}, ), ) self.request.response.status = 201 self.request.response.headers["Location"] = self.request.route_url( "{}:Submissions".format(submission.submissionType), submission_id=submission_id ) return {"data": submission.serialize("view"), "access": access}
from __future__ import annotations from typing import Any, Dict, List, Optional from bson import ObjectId from pymongo import MongoClient from pymongo.collection import Collection from pymongo.database import Database from pymongo.results import DeleteResult, UpdateResult from .codes_options import ask_codec_options from .document_mongo import DOCUMENT_OBJECT_ID, MongoDocument class MongoConnection: def __init__(self, engine: MongoEngine): self.engine = engine self.client = MongoClient(engine.url, document_class=dict, tz_aware=False, connect=False) def begin(self) -> None: pass def commit(self) -> None: pass def rollback(self) -> None: pass def close(self) -> None: self.client.close() def database(self) -> Database: return self.client.get_default_database() def collection(self, name: str) -> Collection: return self.database()[name] def drop_collection(self, name: str) -> None: self.collection(name).drop() def insert_one(self, document: MongoDocument, data: Dict[str, Any]) -> None: self.collection(document.name).insert_one(data) def insert_many(self, document: MongoDocument, data: List[Dict[str, Any]]) -> None: self.collection(document.name).insert_many(data) def update_by_id(self, document: MongoDocument, data: Dict[str, Any], object_id: str) -> UpdateResult: return self.collection(document.name).update_many({DOCUMENT_OBJECT_ID: ObjectId(object_id)}, {'$set': data}) def update_many(self, document: MongoDocument, data: Dict[str, Any], criteria: Dict[str, Any]) -> UpdateResult: return self.collection(document.name).update_many( filter={'$expr': criteria}, update=data, upsert=False ) def delete_by_id(self, document: MongoDocument, object_id: str) -> DeleteResult: return self.collection(document.name).delete_many({DOCUMENT_OBJECT_ID: ObjectId(object_id)}) def delete_many(self, document: MongoDocument, criteria: Dict[str, Any]) -> DeleteResult: return self.collection(document.name).delete_many({'$expr': criteria}) def find_by_id(self, document: MongoDocument, object_id: str) -> Dict[str, Any]: return self.collection(document.name) \ .find_one({DOCUMENT_OBJECT_ID: ObjectId(object_id)}, codec_options=ask_codec_options()) def find(self, document: MongoDocument, criteria: Dict[str, Any], sort: Optional[Dict[str, Any]] = None): if sort is None: return self.collection(document.name).find(filter={'$expr': criteria}, codec_options=ask_codec_options()) else: return self.collection(document.name) \ .find(filter={'$expr': criteria}, sort=sort, codec_options=ask_codec_options()) def find_with_project( self, document: MongoDocument, project: Dict[str, Any], criteria: Dict[str, Any], sort: Optional[Dict[str, Any]] = None): if sort is None: return self.collection(document.name).aggregate(pipeline=[ {'$match': {'$expr': criteria}}, {'$project': project} ], codec_options=ask_codec_options()) else: return self.collection(document.name).aggregate(pipeline=[ {'$match': {'$expr': criteria}}, {'$project': project}, {'$sort': sort} ], codec_options=ask_codec_options()) def find_all(self, document: MongoDocument) -> List[Dict[str, Any]]: return self.collection(document.name).find({}, codec_options=ask_codec_options()) def find_distinct(self, document: MongoDocument, column_name: str, criteria: Dict[str, Any]): results = self.collection(document.name).aggregate(pipeline=[ {'$match': {'$expr': criteria}}, {'$group': {DOCUMENT_OBJECT_ID: f'${column_name}'}} # , 'count': { '$sum': 1 } }} ], codec_options=ask_codec_options()) for item in results: item[column_name] = item[DOCUMENT_OBJECT_ID] del item[DOCUMENT_OBJECT_ID] return results def exists(self, document: MongoDocument, criteria: Dict[str, any]) -> bool: return self.count(document, criteria) != 0 def count(self, document: MongoDocument, criteria: Dict[str, any]) -> int: results = self.collection(document.name).aggregate([ {'$match': {'$expr': criteria}}, {'$count': 'count'} ]) return results[0]['count'] def find_on_group( self, document: MongoDocument, project: Dict[str, Any], criteria: Dict[str, Any], group: Dict[str, Any], sort: Optional[Dict[str, Any]] = None): if sort is None: return self.collection(document.name).aggregate([ {'$match': {'$expr': criteria}}, {'$group': group}, {'$project': project} ], codec_options=ask_codec_options()) else: return self.collection(document.name).aggregate([ {'$match': {'$expr': criteria}}, {'$group': group}, {'$project': project}, {'$sort': sort} ], codec_options=ask_codec_options()) def page( self, document: MongoDocument, criteria: Dict[str, Any], offset: int, limit: int, sort: Optional[Dict[str, Any]] = None): if sort is None: return self.collection(document.name).aggregate([ {'$match': {'$expr': criteria}}, {'$skip': offset}, {'$limit': limit} ]) else: return self.collection(document.name).aggregate([ {'$match': {'$expr': criteria}}, {'$sort': sort}, {'$skip': offset}, {'$limit': limit} ]) class MongoEngine: def __init__(self, url: str): self.url = url def connect(self) -> MongoConnection: return MongoConnection(self)
from os import listdir, path import csv from datetime import datetime from matcher import Matcher from student import Student from validate import unique_items SIGNUP_DATA_DIR_NAME = "signup_data" # Map of column names in the CSV files. # Names just need to partially match # (i.e. "your gender" in "What's your gender?") column_names = { 'name': "What's your full name", 'email': "What's your email address", 'year': "What year are you in", 'gender': "What is your gender", 'same_gender_pref': "Would you prefer to be matched with someone of the same gender", 'intro': "Introduce yourself", 'stay_enrolled': "Would you like to stay enrolled for " } def _format_year(raw_year): year = 0 # Chop off the "Year" prefix if raw_year.lower().startswith("year "): raw_year = raw_year[len("year "):] # TODO: don't automatically put BCS into upper years. if raw_year == "5+" or raw_year == "BCS": year = 5 else: year = int(raw_year) return year def _read_csv(file, header_index=0, skip_until_index=1): """ Yield CSV rows as students, while skipping header rows Parameters ---------- file: file object CSV file to read header_index: int Row index of the header in the CSV file. skip_until_index: int Row index where data starts in the CSV file. Useful if there are unneeded rows in the CSV you want to ignore. """ reader = csv.reader(file) # Default column values column_indexes = {} def get_csv_value(row, column_name, optional=False): column_index = column_indexes.get(column_name) if column_index is None: if optional: return None else: raise RuntimeError(f"Could not find {column_name} column in {file.name}") return row[column_index] for i, row in enumerate(reader): if i == header_index: # Store headers headers = row # Find column numbers for column_name, known_header in column_names.items(): for column_index, header_value in enumerate(headers): if known_header in header_value: column_indexes[column_name] = column_index break print(column_indexes) # Make sure we don't reuse column indexes if not unique_items(column_indexes.values()): raise RuntimeError(f"Could not automatically figure out column indexes in {file.name}") elif i >= skip_until_index: # Process data row yield Student( name=get_csv_value(row, 'name'), email=get_csv_value(row, 'email'), year=_format_year(get_csv_value(row, 'year')), gender=get_csv_value(row, 'gender'), should_match_with_same_gender=_is_true(get_csv_value(row, 'same_gender_pref')), intro=get_csv_value(row, 'intro', optional=True) or "", stay_enrolled=get_csv_value(row, 'stay_enrolled', optional=True) or False ) def _is_true(cell): return cell.strip().lower() == "yes" def read_signup_data(current_month_file_name): """ Read signup data from the CSV files in the signup_data folder. Parameters ---------- current_month_file_name: string Name of the current month's CSV file. File must be inside "signup_data" folder. Output ------ Set[Student], Set[Student] Tuple of upper years and lower years. Raises ------ FileNotFoundError: If input file does not exist """ # Build students lists and sets of student names lower_years = set() upper_years = set() def handle_student_row(student): target_set = lower_years if student.year < 3 else upper_years target_set.add(student) with open(path.join(SIGNUP_DATA_DIR_NAME, current_month_file_name), "r") as f: for student in _read_csv(f): handle_student_row(student) # Iterate through the files of signup data from previous months. for filename in listdir(SIGNUP_DATA_DIR_NAME): if filename != current_month_file_name and filename.endswith(".csv"): with open(path.join(SIGNUP_DATA_DIR_NAME, filename)) as f: for student in _read_csv(f): # Add the students who want to stay enrolled in CS Coffee Chat. if student.stay_enrolled: handle_student_row(student) return (upper_years, lower_years)
import torch import numpy as np import math class ClassifierDetector(): def __init__(self,epsilon,class_size): self.epsilon = epsilon self.class_size = class_size self.x = torch.zeros([class_size,1]) self.y = torch.zeros([class_size,1]) def set_label_distribution(self,label_distribution): # Function: set_label_distribution # Inputs: label_distribution (Pytorch tensor) (class_size) # Process: sets the label distribution from the dataset # Output: none self.label_distribution = label_distribution.unsqueeze(1) self.calculate_covariance() def set_label_pred_distribution(self,model,data_loader,device): # Function: set_label_distribution # Inputs: label_distribution (Pytorch tensor) (class_size) # Process: sets the model predicted label distribution from the dataset # Used Functions: calculate_label_pred_distribution # Output: none self.label_distribution = self.calculate_label_pred_distribution(model,data_loader,device).unsqueeze(1) self.calculate_covariance() def calculate_label_pred_distribution(self,model,data_loader,device): # Function: calculate_label_pred_distribution # Inputs: model (Pytorch model) # data_loader (Pytorch dataloader) # device (Pytorch device) # Process: finds the predicted label distribution of the model # Output: pred_label_distribution (Pytorch tensor) (class_size) k = 0 percent_done = 10 label_prediction_list = np.array([]) class_size = self.class_size model.eval() for image_batch, _ in data_loader: output = model(image_batch.to(device)) label_predictions = output.argmax(1) label_prediction_list = np.append(label_prediction_list,label_predictions.cpu().numpy()) k += 1 if k % (len(data_loader)/10) == 0: print(str(percent_done) + '% Percent Done') percent_done += 10 label_prediction_list = torch.Tensor(label_prediction_list).unsqueeze(0) pred_label_distribution = (label_prediction_list==torch.Tensor(list(range(class_size))).unsqueeze(1)).float().sum(1)/float(k*len(image_batch)) return pred_label_distribution def calculate_covariance(self): # Function: calculate_covariance # Inputs: none # Process: finds the error covariance of the filter # Output: none epsilon = self.epsilon label_distribution = self.label_distribution class_size = self.class_size self.P = (((1-epsilon)**2)/(1-(1-epsilon)**2))*(label_distribution-label_distribution**2)*torch.eye(class_size) def shift_filter(self,model_output,batch_size): # Function: calculate_covariance # Inputs: model_output (pytorch Tensor) (batch_size x class_size) # batch_size (int) # Process: calculates the detection signal using filters # Output: r (pytorch Tensor) (class specific detection signal) (class_size) # g (pytorch Tensor) (general detection signal) (1) try: label_distribution = self.label_distribution except AttributeError: raise AttributeError('Error: Missing Label Distribution, either set the distribution with set_label_distribution or calculate with set_label_pred_distribution') class_size = self.class_size model_output = (model_output.argmax(1).unsqueeze(0)==torch.Tensor(list(range(class_size))).unsqueeze(1)).float().sum(1).unsqueeze(1) x = self.x y = self.y P = self.P epsilon = self.epsilon y = y + model_output x = x + label_distribution*batch_size residual = y-x r = torch.matmul(torch.inverse(P),residual)*residual g = torch.matmul(residual.transpose(0,1),torch.matmul(torch.inverse(P),residual)) x = x + epsilon*(y-x) self.x = x self.y = y return r.squeeze(),g.squeeze() class VariationalDetector(): def __init__(self,epsilon,latent_dim): self.epsilon = epsilon self.latent_dim = latent_dim self.x = 0 self.y = 0 def set_latent_distribution(self,model,data_loader,device,batch_size): # Function: set_latent_distribution # Inputs: model (Pytorch tensor) (class_size) # data_loader (Pytorch dataloader) # device (Pytorch device) # batch_size (int) # Process: sets the latent distribution from the dataset # Used Functions: calculate_latent_distribution # Output: latent_variable_list (pytorch Tensor) self.latent_distribution,latent_variable_list = self.calculate_latent_distribution(model,data_loader,device,batch_size) return latent_variable_list def calculate_latent_distribution(self,model,data_loader,device,batch_size): # Function: calculate_latent_distribution # Inputs: model (Pytorch tensor) (class_size) # data_loader (Pytorch dataloader) # device (Pytorch device) # batch_size (int) # Process: calculates the latent distribution from the dataset # Output: latent_distribution (pytorch Tensor) # latent_variable_list (pytorch Tensor) k = 0 percent_done = 10 latent_variable_list = np.array([]) latent_dim = self.latent_dim model.eval() for image_batch, _ in data_loader: _, embed_out, _, _ = model(image_batch.to(device)) for bb in range(batch_size): embed_var = torch.dot(embed_out[bb,:,:,:].squeeze(),embed_out[bb,:,:,:].squeeze())/self.latent_dim latent_variable_list = np.append(latent_variable_list,embed_var.detach().cpu().numpy()) k += 1 if k % (len(data_loader)/10) == 0: print(str(percent_done) + '% Percent Done') percent_done += 10 latent_variable_list = torch.Tensor(latent_variable_list) latent_distribution = latent_variable_list.mean() return latent_distribution,latent_variable_list def shift_filter(self,embed_out,batch_size): # Function: calculate_latent_distribution # Inputs: embed_out (Pytorch tensor) (batch_size x latent_dim) # batch_size (int) # Process: calculates the detection signal using filters # Output: r (pytorch Tensor) (detection signal) try: latent_distribution = self.latent_distribution except AttributeError: raise AttributeError('Error: Missing Latent Distribution, set the distribution with calculate_latent_distribution') latent_dim = self.latent_dim embed_var = embed_out.squeeze().pow(2).sum(1)/self.latent_dim embed_var = embed_var.detach().cpu().sum()/batch_size x = self.x y = self.y epsilon = self.epsilon y = y + embed_var x = x + latent_distribution residual = y-x r = residual.pow(2) x = x + epsilon*(y-x) self.x = x self.y = y return r.squeeze()
<gh_stars>1-10 import numpy as np import pandas as pd import matplotlib.pyplot as plt def read_table(filename): df = pd.read_csv(filename) return df def result_allfactor_effect(df, result, order=False, factor_filter=None): ''' one result (e.g. f1 or acc) for all experiment factor plot the curve diagram for rank ''' plt.figure(figsize=(16,24), dpi=80) df = df.sort_values(result,ascending=order) if factor_filter: df = df[factor_filter] plt.cla() plt.clf() cols = df.columns plt.suptitle(result) # loop for all column in factor_filter for i, col in enumerate(cols): plt.subplot(len(cols), 1, i+1) params = pd.unique(df[col]) params_curve = { p:[0] for p in params} # loop the ranking for all value in each column for val in df[col]: for k in params_curve: params_curve[k].append(params_curve[k][-1]) params_curve[val][-1] = params_curve[val][-1]+1 plt.title(col) for k in sorted(params_curve.keys()): plt.plot(params_curve[k], label=k) plt.legend() # plt.show() plt.savefig(result+'_racing.png') def results_correlation(df, result1, result2): ''' plot correlation for two result (e.g. f1 and acc) scatter plot ''' plt.figure(figsize=(8,8), dpi=80) plt.cla() plt.clf() x = df[result1] y = df[result2] plt.xlabel(result1) plt.ylabel(result2) plt.scatter(x, y) # plt.show() plt.savefig(result1+'_'+result2+'_correlation.png') def result_single_factor_dist(df, result, factors): ''' one result (e.g. f1 of acc) one experiment factor plot scatter factor value and result ''' for f in factors: plt.figure(figsize=(8,8), dpi=80) plt.cla() plt.clf() # if f == 'label_threshold': # ax = plt.gca() # ax.set_xlim([0,0.002]) x = df[f] y = df[result] plt.xlabel(f) plt.ylabel(result) plt.scatter(x, y) # plt.show() plt.savefig(result+'_'+f+'_single_factor.png') def main(): df = read_table('evaluation.csv') # result_allfactor_effect(df, 'valid_loss_min', order=True, factor_filter=['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) # result_allfactor_effect(df, 'valid_acc_max', order=False, factor_filter=['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) # result_allfactor_effect(df, 'valid_f1_max', order=False, factor_filter=['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) # result_allfactor_effect(df, 'valid_loss_100_epoch', order=True, factor_filter=['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) # result_allfactor_effect(df, 'valid_acc_100_epoch', order=False, factor_filter=['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) # result_allfactor_effect(df, 'valid_f1_100_epoch', order=False, factor_filter=['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) # results_correlation(df, 'valid_f1_max', 'valid_acc_max') # results_correlation(df, 'valid_loss_min', 'valid_acc_max') # results_correlation(df, 'valid_loss_min', 'valid_f1_max') # results_correlation(df, 'valid_f1_500_epoch', 'valid_acc_500_epoch') # results_correlation(df, 'valid_loss_500_epoch', 'valid_acc_500_epoch') # results_correlation(df, 'valid_loss_500_epoch', 'valid_f1_500_epoch') # result_single_factor_dist(df, 'valid_f1_max', ['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) # result_single_factor_dist(df, 'valid_loss_min', ['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) # result_single_factor_dist(df, 'valid_acc_max', ['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) result_single_factor_dist(df, 'valid_loss_100_epoch', ['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) result_single_factor_dist(df, 'valid_acc_100_epoch', ['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) result_single_factor_dist(df, 'valid_f1_100_epoch', ['window_size','k','feature_num','label_threshold','lstm_units','learning_rate','epsilon','regularizer']) if __name__ == '__main__': main()
import os import csv import wave import sys import numpy as np import pandas as pd import glob def split_wav(wav, features, emotions): (nchannels, sampwidth, framerate, nframes, comptype, compname), samples = wav left = samples[0::nchannels] right = samples[1::nchannels] shift = len(left) // np.array(features).shape[0] frames = [] for ie, e in enumerate(emotions): start = e['start'] end = e['end'] e['right'] = right[int(start * framerate):int(end * framerate)] e['left'] = left[int(start * framerate):int(end * framerate)] e['acoustic_features'] = features[int(start * framerate // shift):int(end * framerate // shift)] frames.append({ 'left': e['left'], 'right': e['right'], 'acoustic_features': e['acoustic_features'] }) return frames def get_field(data, key): return np.array([e[key] for e in data]) def pad_sequence_into_array(Xs, maxlen=None, truncating='post', padding='post', value=0.): Nsamples = len(Xs) if maxlen is None: lengths = [ s.shape[0] for s in Xs ] # 'sequences' must be list, 's' must be numpy array, len(s) return the first dimension of s maxlen = np.max(lengths) Xout = np.ones(shape=[Nsamples, maxlen] + list(Xs[0].shape[1:]), dtype=Xs[0].dtype) * np.asarray(value, dtype=Xs[0].dtype) Mask = np.zeros(shape=[Nsamples, maxlen], dtype=Xout.dtype) for i in range(Nsamples): x = Xs[i] if truncating == 'pre': trunc = x[-maxlen:] elif truncating == 'post': trunc = x[:maxlen] else: raise ValueError("Truncating type '%s' not understood" % truncating) if padding == 'post': Xout[i, :len(trunc)] = trunc Mask[i, :len(trunc)] = 1 elif padding == 'pre': Xout[i, -len(trunc):] = trunc Mask[i, -len(trunc):] = 1 else: raise ValueError("Padding type '%s' not understood" % padding) return Xout, Mask def convert_gt_from_array_to_list(gt_batch, gt_batch_mask=None): B, L = gt_batch.shape gt_batch = gt_batch.astype('int') gts = [] for i in range(B): if gt_batch_mask is None: l = L else: l = int(gt_batch_mask[i, :].sum()) gts.append(gt_batch[i, :l].tolist()) return gts def get_audio(path_to_wav, filename): wav = wave.open(path_to_wav + filename, mode="r") (nchannels, sampwidth, framerate, nframes, comptype, compname) = wav.getparams() content = wav.readframes(nframes) samples = np.fromstring(content, dtype=np.int16) return (nchannels, sampwidth, framerate, nframes, comptype, compname), samples def get_transcriptions(path_to_transcriptions, filename): f = open(path_to_transcriptions + filename, 'r').read() f = np.array(f.split('\n')) transcription = {} for i in range(len(f) - 1): g = f[i] i1 = g.find(': ') i0 = g.find(' [') ind_id = g[:i0] ind_ts = g[i1 + 2:] transcription[ind_id] = ind_ts return transcription def get_emotions(path_to_emotions, filename): f = open(path_to_emotions + filename, 'r').read() f = np.array(f.split('\n')) idx = f == '' idx_n = np.arange(len(f))[idx] emotion = [] for i in range(len(idx_n) - 2): g = f[idx_n[i] + 1:idx_n[i + 1]] head = g[0] i0 = head.find(' - ') start_time = float(head[head.find('[') + 1:head.find(' - ')]) end_time = float(head[head.find(' - ') + 3:head.find(']')]) actor_id = head[head.find(filename[:-4]) + len(filename[:-4]) + 1:head.find(filename[:-4]) + len(filename[:-4]) + 5] emo = head[head.find('\t[') - 3:head.find('\t[')] vad = head[head.find('\t[') + 1:] v = float(vad[1:7]) a = float(vad[9:15]) d = float(vad[17:23]) j = 1 emos = [] while g[j][0] == "C": head = g[j] start_idx = head.find("\t") + 1 evoluator_emo = [] idx = head.find(";", start_idx) while idx != -1: evoluator_emo.append(head[start_idx:idx].strip().lower()[:3]) start_idx = idx + 1 idx = head.find(";", start_idx) emos.append(evoluator_emo) j += 1 emotion.append({ 'start': start_time, 'end': end_time, 'id': filename[:-4] + '_' + actor_id, 'v': v, 'a': a, 'd': d, 'emotion': emo, 'emo_evo': emos }) return emotion
import functools import pathlib import time from typing import Any, Callable, Optional import peewee from .utils import CACHING_DISABLED, USER_DATA_DIR, abspath SCHEMA_VERSION = 1 DATABASE_PATH = pathlib.Path(USER_DATA_DIR).joinpath("data.db") CACHE_EXPIRY_THRESHOLD = 3600 * 24 * 7 # A week database = peewee.SqliteDatabase(None) _database_initialized = False AnyCallable = Callable[..., Any] class _BaseModel(peewee.Model): class Meta: database = database class URL(_BaseModel): url = peewee.TextField(unique=True) workdir = peewee.TextField() last_access = peewee.FloatField() # POSIX timestamp def initialize_database(path: pathlib.Path = None) -> None: global _database_initialized if _database_initialized: return if CACHING_DISABLED: _database_initialized = True return path = path or DATABASE_PATH path.parent.mkdir(parents=True, exist_ok=True) database.init(str(path)) database.connect() schema_version = database.execute_sql("PRAGMA user_version;").fetchone()[0] if schema_version == 0: # New database database.execute_sql(f"PRAGMA user_version = {SCHEMA_VERSION};") database.create_tables([URL], safe=True) # Expire old entries expiry_time = time.time() - CACHE_EXPIRY_THRESHOLD URL.delete().where(URL.last_access < expiry_time).execute() _database_initialized = True # Decorator to ensure database is initialized before executing a # function. def ensure_database(func: AnyCallable) -> AnyCallable: @functools.wraps(func) def wrapper(*args, **kwargs): initialize_database() return func(*args, **kwargs) return wrapper # Decorator that's basically equivalent to database.atomic(). def atomic(func: AnyCallable) -> AnyCallable: if CACHING_DISABLED: return func else: return ensure_database(database.atomic()(func)) # Returns a decorator that returns the fallback value if caching is # disabled. def requires_cache(fallback: Any = None) -> Callable[[AnyCallable], AnyCallable]: def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): if CACHING_DISABLED: return fallback else: return func(*args, **kwargs) return wrapper return decorator @requires_cache() @ensure_database @database.atomic() def insert(url: str, workdir: pathlib.Path) -> None: workdir_str = str(abspath(workdir)) try: record = URL.get(URL.url == url) record.workdir = workdir_str record.last_access = time.time() record.save() except peewee.DoesNotExist: URL.create(url=url, workdir=workdir_str, last_access=time.time()) @requires_cache() @ensure_database @database.atomic() def touch(url: str) -> None: try: record = URL.get(URL.url == url) record.last_access = time.time() record.save() except peewee.DoesNotExist: pass @requires_cache() @ensure_database @database.atomic() def drop(url: str) -> None: try: record = URL.get(URL.url == url) record.delete_instance() except peewee.DoesNotExist: pass @requires_cache() @ensure_database def get_workdir(url: str) -> Optional[pathlib.Path]: try: record = URL.get(URL.url == url) return pathlib.Path(record.workdir) except peewee.DoesNotExist: return None
"""Stream new Reddit posts and notify for matching posts.""" import datetime import os import sys import time import apprise import praw import prawcore import yaml CONFIG_PATH = os.getenv("RPN_CONFIG", "config.yaml") LOGGING = os.getenv("RPN_LOGGING", "FALSE") YAML_KEY_APPRISE = "apprise" YAML_KEY_REDDIT = "reddit" YAML_KEY_SUBREDDITS = "subreddits" YAML_KEY_CLIENT = "client" YAML_KEY_SECRET = "secret" YAML_KEY_AGENT = "agent" def main(): """Run application.""" print("Starting Reddit Post Notifier") config = get_config() apprise_config = config[YAML_KEY_APPRISE] reddit_config = config[YAML_KEY_REDDIT] subreddits = reddit_config[YAML_KEY_SUBREDDITS] apprise_client = get_apprise_client(apprise_config) reddit_client = get_reddit_client( reddit_config[YAML_KEY_CLIENT], reddit_config[YAML_KEY_SECRET], reddit_config[YAML_KEY_AGENT] ) validate_subreddits(reddit_client, subreddits) stream_submissions(reddit_client, subreddits, apprise_client) def stream_submissions(reddit, subreddits, apprise_client): """Monitor and process new Reddit submissions in given subreddits.""" subs = subreddits.keys() subs_joined = "+".join(subs) subreddit = reddit.subreddit(subs_joined) while True: try: for submission in subreddit.stream.submissions(pause_after=None, skip_existing=True): process_submission(submission, subreddits, apprise_client) except KeyboardInterrupt: sys.exit("\tStopping application, bye bye") except (praw.exceptions.PRAWException, prawcore.exceptions.PrawcoreException) as exception: print("Reddit API Error: ") print(exception) print("Pausing for 30 seconds...") time.sleep(30) def process_submission(submission, subreddits, apprise_client): """Notify if given submission matches search.""" title = submission.title sub = submission.subreddit.display_name search_terms = subreddits[sub.lower()] if any(term in title.lower() for term in search_terms): notify(apprise_client, title, submission.id) if LOGGING != "FALSE": print(datetime.datetime.fromtimestamp(submission.created_utc), " " + "r/" + sub + ": " + title) def notify(apprise_client, title, submission_id): """Send apprise notification.""" apprise_client.notify( title=title, body="https://www.reddit.com/" + submission_id, ) def get_reddit_client(cid, secret, agent): """Return PRAW Reddit instance.""" return praw.Reddit( client_id=cid, client_secret=secret, user_agent=agent ) def get_apprise_client(config): """Return Apprise instance.""" apprise_client = apprise.Apprise() for conf in config: apprise_client.add(conf) return apprise_client def get_config(): """Returns application configuration.""" check_config_file() config = load_config() return validate_config(config) def check_config_file(): """Check if config file exists.""" if not os.path.exists(CONFIG_PATH): sys.exit("Missing config file: " + CONFIG_PATH) print("Using config file: " + CONFIG_PATH) def load_config(): """Load config into memory.""" with open(CONFIG_PATH, "r") as config_yaml: config = None try: config = yaml.safe_load(config_yaml) except yaml.YAMLError as exception: if hasattr(exception, "problem_mark"): mark = exception.problem_mark # pylint: disable=no-member print("Invalid yaml, line %s column %s" % (mark.line + 1, mark.column + 1)) sys.exit("Invalid config: failed to parse yaml") if not config: sys.exit("Invalid config: empty file") return config def validate_config(config): """Validate required config keys.""" if YAML_KEY_REDDIT not in config or not isinstance(config[YAML_KEY_REDDIT], dict): sys.exit("Invalid config: missing reddit config") reddit = config[YAML_KEY_REDDIT] if YAML_KEY_CLIENT not in reddit or not isinstance(reddit[YAML_KEY_CLIENT], str): sys.exit("Invalid config: missing reddit -> client config") if YAML_KEY_SECRET not in reddit or not isinstance(reddit[YAML_KEY_SECRET], str): sys.exit("Invalid config: missing reddit -> secret config") if YAML_KEY_AGENT not in reddit or not isinstance(reddit[YAML_KEY_AGENT], str): sys.exit("Invalid config: missing reddit -> agent config") if YAML_KEY_SUBREDDITS not in reddit or not isinstance(reddit[YAML_KEY_SUBREDDITS], dict): sys.exit("Invalid config: missing reddit -> subreddits config") if YAML_KEY_APPRISE not in config or not isinstance(config[YAML_KEY_APPRISE], list): sys.exit("Invalid config: missing apprise config") print("Monitoring Reddit for:") subs = reddit[YAML_KEY_SUBREDDITS] for conf in subs: current = subs[conf] if not isinstance(current, list) or not current: sys.exit("Invalid config: \'" + conf + "\' needs a list of search strings") if not all(isinstance(item, str) for item in current): sys.exit("Invalid config: \'" + conf + "\' needs a list of search strings") subs[conf] = [x.lower() for x in current] print("\tr/" + conf + ": ", current) print("") reddit[YAML_KEY_SUBREDDITS] = {k.lower(): v for k, v in subs.items()} return config def validate_subreddits(reddit, subreddits): """Validate subreddits.""" for sub in subreddits: try: reddit.subreddit(sub).id except prawcore.exceptions.Redirect: sys.exit("Invalid Subreddit: " + sub) except (praw.exceptions.PRAWException, prawcore.exceptions.PrawcoreException) as exception: print("Reddit API Error: ") print(exception) if __name__ == "__main__": main()
<reponame>OceansAus/cosima-cookbook import matplotlib.pyplot as plt import cosima_cookbook as cc from tqdm import tqdm_notebook import IPython.display def wind_stress(expts=[]): """ Plot zonally averaged wind stress. Parameters ---------- expts : str or list of str Experiment name(s). """ if not isinstance(expts, list): expts = [expts] # computing results = [] for expt in tqdm_notebook(expts, leave=False, desc="experiments"): result = {"mean_tau_x": cc.diagnostics.mean_tau_x(expt), "expt": expt} results.append(result) IPython.display.clear_output() plt.figure(figsize=(12, 6)) # plotting for result in results: mean_tau_x = result["mean_tau_x"] expt = result["expt"] plt.plot(mean_tau_x, mean_tau_x.yu_ocean, linewidth=2, label=expt) plt.ylim([-70, 65]) plt.xlim([-0.08, 0.20]) plt.ylabel("Latitude ($^\circ$N)") plt.xlabel("Stress (N m$^{-2}$)") plt.legend(fontsize=10, loc="best") def annual_scalar(expts=[], variables=[]): """ Calculate and plot annual average of variable(s) for experiment(s). Parameters ---------- expts : str or list of str Experiment name(s). variable : str or list of str Variable name(s). """ if not isinstance(expts, list): expts = [expts] if not isinstance(variables, list): variables = [variables] # computing results = [] for expt in tqdm_notebook(expts, leave=False, desc="experiments"): annual_average = cc.diagnostics.annual_scalar(expt, variables) result = {"annual_average": annual_average, "expt": expt} results.append(result) IPython.display.clear_output() # plotting each variable in a separate plot for variable in variables: plt.figure(figsize=(12, 6)) for result in results: annual_average = result["annual_average"] expt = result["expt"] annual_average[variable].plot(label=expt) plt.title(annual_average[variable].long_name) plt.legend(fontsize=10, bbox_to_anchor=(1, 1), loc="best", borderaxespad=0.0) plt.xlabel("Time") def drake_passage(expts=[]): """ Plot Drake Passage transport. Parameters ---------- expts : str or list of str Experiment name(s). """ plt.figure(figsize=(12, 6)) if not isinstance(expts, list): expts = [expts] # computing results = [] for expt in tqdm_notebook(expts, leave=False, desc="experiments"): transport = cc.diagnostics.drake_passage(expt) result = {"transport": transport, "expt": expt} results.append(result) IPython.display.clear_output() # plotting for result in results: transport = result["transport"] expt = result["expt"] transport.plot(label=expt) plt.title("Drake Passage Transport") plt.xlabel("Time") plt.ylabel("Transport (Sv)") plt.legend(fontsize=10, loc="best") def bering_strait(expts=[]): """ Plot Bering Strait transport. Parameters ---------- expts : str or list of str Experiment name(s). """ plt.figure(figsize=(12, 6)) if not isinstance(expts, list): expts = [expts] for expt in tqdm_notebook(expts, leave=False, desc="experiments"): transport = cc.diagnostics.bering_strait(expt) transport.plot(label=expt) IPython.display.clear_output() plt.title("Bering Strait Transport") plt.xlabel("Time") plt.ylabel("Transport (Sv)") plt.legend(fontsize=10, loc="best") def aabw(expts=[]): """ Plot timeseries of AABW transport measured at 55S. Parameters ---------- expts : str or list of str Experiment name(s). """ plt.figure(figsize=(12, 6)) if not isinstance(expts, list): expts = [expts] for expt in tqdm_notebook(expts, leave=False, desc="experiments"): psi_aabw = cc.diagnostics.calc_aabw(expt) psi_aabw.plot(label=expt) IPython.display.clear_output() plt.title("AABW Transport at 40S") plt.xlabel("Time") plt.ylabel("Transport (Sv)") plt.legend(fontsize=10, loc="best") def amoc(expts=[]): """ Plot timeseries of AMOC transport measured at 26N. Parameters ---------- expts : str or list of str Experiment name(s). """ plt.figure(figsize=(12, 6)) if not isinstance(expts, list): expts = [expts] for expt in tqdm_notebook(expts, leave=False, desc="experiments"): psi_amoc = cc.diagnostics.calc_amoc(expt) psi_amoc.plot(label=expt) IPython.display.clear_output() plt.title("AMOC Transport at 26N") plt.xlabel("Time") plt.ylabel("Transport (Sv)") plt.legend(fontsize=10, loc="best") def amoc_south(expts=[]): """ Plot timeseries of AMOC transport measured at 35S. Parameters ---------- expts : str or list of str Experiment name(s). """ plt.figure(figsize=(12, 6)) if not isinstance(expts, list): expts = [expts] for expt in tqdm_notebook(expts, leave=False, desc="experiments"): psi_amoc_south = cc.diagnostics.calc_amoc_south(expt) psi_amoc_south.plot(label=expt) IPython.display.clear_output() plt.title("AMOC Transport at 35S") plt.xlabel("Time") plt.ylabel("Transport (Sv)") plt.legend(fontsize=10, loc="best")
#!/usr/bin/env python #coding: utf-8 """ This module simply sends request to the Online Labs API, and returns their response as a dict. """ import requests from json import dumps API_COMPUTE = 'https://api.cloud.online.net' API_ACCOUNT = "https://account.cloud.online.net" class OlError(RuntimeError): pass class OlManager(object): def __init__(self, token=None, debug=None): self.oltoken = token self.apic = API_COMPUTE self.apia = API_ACCOUNT self.debug = debug def organizations(self): json = self.request(self.apia+'/organizations') return json['organizations'] def set_token(self, token): self.oltoken = token def new_token(self, email, password, expires=True): data = { 'email': email, 'password': password, 'expires': expires, } json = self.request(self.apia+'/tokens', data=data, method='POST') return json['token'] def user(self, user_id): json = self.request(self.apia+'/users/%s' % user_id) return json['user'] def tokens(self): json = self.request(self.apia+'/tokens') return json['tokens'] def token(self, token_id): json = self.request(self.apia+'/token/%s' % token_id) return json['token'] def extend_token(self, token_id): json = self.request(self.apia+'/token/%s' % token_id, method='PATCH') return json['server'] def delete_token(self, token_id): self.request(self.apia+'/token/%s' % token_id, method='DELETE') def servers(self): json = self.request(self.apic+'/servers') return json['servers'] def server(self, server_id): json = self.request(self.apic+'/servers/%s' % server_id) return json['server'] def new_server(self, name, organization_id, image_id, volumes, tags=[]): data = { 'name': name, 'organization': organization_id, 'image': image_id, 'volumes': volumes, 'tags': tags, } json = self.request(self.apic+'/servers', data=data, method='POST') return json['server'] def update_server(self, server): json = self.request(self.apic+'/servers/%s' % server['id'], data=server, method='PUT') return json['server'] def delete_server(self, server_id): json = self.request(self.apic+'/servers/%s' % server_id, method='DELETE') return json['server'] def server_actions(self, server_id): json = self.request(self.apic+'/servers/%s/action' % server_id) return json['actions'] def server_action(self, server_id, action): data = { 'action': action, } json = self.request(self.apic+'/servers/%s/action' % server_id, data=data, method='POST') return json['task'] def volumes(self): json = self.request(self.apic+'/volumes') return json['volumes'] def new_volume(self, name, size, organization_id, volume_type): if volume_type not in ["l_ssd", "l_hdd"]: raise OlError("Volume type should be l_ssd or l_hdd") data = { "name": name, "size": size, "organization": organization_id, "volume_type": volume_type, } json = self.request(self.apic+'/volumes', data=data, method='POST') return json['volume'] def volume(self, volume_id): json = self.request(self.apic+'/volumes/%s' % volume_id) return json['volume'] def delete_volume(self, volume_id): self.request(self.apic+'/volumes/%s' % volume_id, method='DELETE') def new_snapshot(self, name, organization_id, volume_id): data = { 'name': name, 'organization': organization_id, 'volume': volume_id, } json = self.request(self.apic+'/snapshots', data=data, method='POST') return json['snapshot'] def update_snapshot(self, snapshot): json = self.request(self.apic+'/snapshot/%s' % snapshot['id'], data=snapshot, method='PUT') return json['snapshot'] def snapshots(self): json = self.request(self.apic+'/snapshots') return json['snapshots'] def snapshot(self, snapshot_id): json = self.request(self.apic+'/snapshots/%s' % snapshot_id) return json['snapshot'] def delete_snapshot(self, snapshot_id): self.request(self.apic+'/snapshots/%s' % snapshot_id, method='DELETE') def image(self, image_id): json = self.request(self.apic+'/images/%s' % image_id) return json['image'] def images(self): json = self.request(self.apic+'/images') return json['images'] def new_image(self, name, organization_id, arch, volume_id): data = ~{ 'name': name, 'organization': organization_id, 'arch': arch, 'root_volume': volume_id, } json = self.request(self.apic+'/images', data=data, method='POST') return json['image'] def update_image(self, image): json = self.request(self.apic+'/images/%s' % image['id'], data=image, method='PUT') return json['image'] def delete_image(self, image_id): self.request(self.apic+'/images/%s' % image_id, method='DELETE') def new_ips(self, organization_id): data = ~{ 'organization': organization_id, } json = self.request(self.apic+'/ips', data=data, method='POST') return json['ip'] def ips(self): json = self.request(self.apic+'/ips') return json['ips'] def ip(self, ip_id): json = self.request(self.apic+'/ips/%s' % ip_id) return json['ip'] def remap_ip(self, ip_id, address, server_id, organization_id, ): data = { 'address': address, 'id': ip_id, 'organization': organization_id, 'server': server_id, } json = self.request(self.apic+'/ips/%s' % ip_id, data=data, method='POST') return json['ip'] def delete_ips(self, ip_id): self.request(self.apic+'/ips/%s' % ip_id) def request(self, url, data={}, method='GET'): headers = {"content-type": "application/json"} if self.token is not None: headers["X-Auth-Token"] = self.oltoken if self.debug: print("Headers : %s" % headers) print("Url: %s" % url) print("Method: %s" % method) print("Data: %s\n" % dumps(data)) try: if method == 'POST': resp = requests.post(url, data=dumps(data), headers=headers, timeout=60) json = resp.json() elif method == 'DELETE': resp = requests.delete(url, headers=headers, timeout=60) json = {'status': resp.status_code} elif method == 'PUT': resp = requests.put(url, headers=headers, data=dumps(data), timeout=60) json = resp.json() elif method == 'GET': resp = requests.get(url, headers=headers, data=dumps(data), timeout=60) json = resp.json() elif method == 'PATCH': resp = requests.patch(url, headers=headers, data=dumps(data), timeout=60) json = resp.json() else: raise OlError('Unsupported method %s' % method) except ValueError: # requests.models.json.JSONDecodeError raise ValueError("The API server doesn't respond with a valid json") except requests.RequestException as e: # errors from requests raise RuntimeError(e) if resp.status_code != requests.codes.ok: if json: if 'error_message' in json: raise OlError(json['error_message']) elif 'message' in json: raise OlError(json['message']) # The JSON reponse is bad, so raise an exception with the HTTP status resp.raise_for_status() if json.get('id') == 'not_found': raise OlError(json['message']) return json if __name__ == '__main__': import os import sys api_debug = os.environ.get('OL_API_DEBUG') api_token = os.environ.get('OL_API_TOKEN') api_id = os.environ.get('OL_API_ID') api_pass = os.environ.get('OL_API_PASSWD') if api_token is not None: ol = OlManager(api_token, api_debug) elif api_id is not None and api_pass is not None: ol = OlManager(debug=api_debug) ol.set_token(ol.new_token(api_id, api_pass)['id']) else: sys.exit("Error: OL_API_TOKEN or OL_API_ID, OL_API_PASSWD enviroment variables are not set") fname = sys.argv[1] import pprint pprint.pprint(getattr(ol, fname)(*sys.argv[2:]))
import gym import torch import torch.nn.functional as F from dqn.agents.cartpole.model import DQN from dqn.replay_memory import ReplayMemory, Sample from dqn.agents.cartpole.config import CartPoleConfig from dqn.agents.base_agent import BaseAgent from dqn.agents.cartpole.utils import preprocess_observation, preprocess_sampled_batch class CartPoleAgent(BaseAgent): """The CartPole agent. """ def __init__(self) -> None: self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Initialize the agent configuration. self.cfg = CartPoleConfig() # Initialize the gym environment. self.env = gym.make(self.cfg.env) # Initialize the candidate deep Q-network. self.dqn = DQN(cfg=self.cfg).to(self.device) # Initialize target deep Q-network. self.target_dqn = DQN(cfg=self.cfg).to(self.device) # Create the replay memory. self.memory = ReplayMemory(self.cfg.train.memory_size) # Initialize optimizer used for training the DQN. self.optimizer = torch.optim.Adam(self.dqn.parameters(), lr=self.cfg.train.lr) def train(self) -> None: # Keep track of best evaluation mean return achieved so far. best_mean_return = float("-inf") for episode in range(self.cfg.train.episodes): done = False obs = preprocess_observation(self.env.reset()) steps = 0 while not done: # Get an action from the DQN. action = self.dqn.act(obs) # Act in the true environment. next_obs, reward, done, info = self.env.step(action) # Preprocess the incoming observation. next_obs = preprocess_observation(next_obs) # Add the transition to the replay memory. sample = Sample(obs, action, next_obs, reward, done) self.memory.push(sample) # Optimize the DQN every cfg.train.frequency steps. if steps % self.cfg.train.frequency == 0: self.optimize() # Update the target DQN with the candidate DQN every cfg.train.target_update_frequency steps. if steps % self.cfg.train.target_update_frequency == 0: self._update_target_dqn() steps += 1 obs = next_obs # Evaluate the current agent. if episode % self.cfg.evaluate.frequency == 0: mean_return = self.evaluate() print( f"Episode {episode}/{self.cfg.train.episodes}, Mean Return: {mean_return}" ) # Save current agent if it has the best performance. if mean_return >= best_mean_return: best_mean_return = mean_return print("Best performance so far, Saving model.") torch.save(self.dqn, self.cfg.model_path) # Update the epsilon value. self.dqn.epsilon = self._update_epsilon( self.dqn.epsilon, self.dqn.epsilon_end ) self.env.close() def optimize(self) -> float: # Check if enough transitions are available in the replay buffer before optimizing. if len(self.memory) < self.dqn.batch_size: return float("inf") # Sample a batch from the replay memory. batch = self.memory.sample(self.dqn.batch_size) obs, next_obs, actions, rewards, dones = preprocess_sampled_batch(batch) # Compute the current estimates of the Q-values for each state-action pair (s,a). q_values_expected = self.dqn(obs).gather(1, actions) next_q_values = self.target_dqn(next_obs).detach().max(1)[0].unsqueeze(1) # Compute the Q-value targets ONLY for non-terminal transitions # If it is a terminal transition, (1 - dones) will evaluate to 0. q_value_targets = rewards + (self.dqn.gamma * next_q_values * (1 - dones)) loss = F.mse_loss(q_values_expected, q_value_targets) self.optimizer.zero_grad() loss.backward() self.optimizer.step() return loss.item() def evaluate(self, render: bool = False) -> float: total_return = 0 for i in range(self.cfg.evaluate.episodes): obs = preprocess_observation(self.env.reset()).unsqueeze(0) done = False episode_return = 0 while not done: if render: self.env.render() action = self.dqn.act(obs) obs, reward, done, info = self.env.step(action) obs = preprocess_observation(obs).unsqueeze(0) episode_return += reward total_return += episode_return return total_return / self.cfg.evaluate.episodes def simulate(self) -> None: self.dqn = torch.load(self.cfg.model_path, map_location=self.device) self.cfg.evaluate.episodes = 3 mean_return = self.evaluate(render=True) print(f"Simulation Complete. Mean Return: {mean_return}") def _update_target_dqn(self) -> None: """Updates the target DQN weights with the training DQN weights. """ self.target_dqn.load_state_dict(self.dqn.state_dict()) @staticmethod def _update_epsilon(epsilon: float, epsilon_end: float) -> float: """Updates the epsilon value as training progresses to reduce exploration. Args: epsilon (float): The epsilon start value. epsilon_end (float): The epsilon end value. Returns: float: The updated epsilon value. """ return max(epsilon_end, 0.99 * epsilon)
import copy import json import logging import os import sys import tempfile import time import traceback import h5py import numpy as np import tables import tensorflow as tf from opentamp.src.policy_hooks.vae.vae_networks import * ''' Random things to remember: - End with no-op task (since we go obs + task -> next_obs, we want last obs + task -> last obs for code simplicity) - Or cut last timestep? - Policy gets a reward for finding bad encode/decode paths? - Constrain conditional encoding (i.e. latent output) against prior? ''' LATENT_DIM = 16 ENCODER_CONFIG = { 'n_channels': [16, 32, 32], 'filter_sizes': [5, 5, 5], 'strides': [3, 3, 3], 'fc_dims': [LATENT_DIM] # [2 * 3 * 32] # 'out_act': 'tanh', } DECODER_CONFIG = { 'conv_init_shape': [2, 3, 32], 'n_channels': [32, 16, 3], 'filter_sizes': [5, 5, 5], 'strides': [3, 3, 3], 'fc_dims': None, 'out_act': 'sigmoid', } LATENT_DYNAMICS_CONFIG = { 'fc_dims': [LATENT_DIM, LATENT_DIM], } class VAE(object): def __init__(self, hyperparams): self.config = hyperparams tf.reset_default_graph() tf.set_random_seed(self.config.get('random_seed', 1234)) self.tf_iter = 0 self.batch_size = self.config.get('batch_size', 64) self.train_iters = self.config.get('train_iters', 100) self.T = self.config['rollout_len'] - 2 self.rollout_len = self.config['rollout_len'] - 2 self.obs_dims = [80, 107, 3] # list(hyperparams['obs_dims']) self.task_dim = hyperparams['task_dims'] # The following hyperparameters also describe where the weights are saved self.weight_dir = hyperparams['weight_dir'] # if self.load_step < 0: # is_rnn = 'rnn' if self.use_recurrent_dynamics else 'fc' # overshoot = 'overshoot' if self.use_overshooting else 'onestep' # self.ckpt_name = self.weight_dir+'/vae_{0}_{1}_{2}.ckpt'.format(self.train_mode, is_rnn, overshoot) # else: # self.ckpt_name = self.weight_dir+'/vae_{0}_{1}_{2}.ckpt'.format(self.train_mode, is_rnn, overshoot, load_step) if hyperparams.get('load_data', True): f_mode = 'a' self.data_file = self.weight_dir+'/vae_buffer.hdf5' self.data = h5py.File(self.data_file, f_mode) try: self.obs_data = self.data['obs_data'] self.task_data = self.data['task_data'] self.task_data = self.task_data[:, :, :self.task_dim] self.task_dim = self.task_data.shape[-1] except: obs_data = np.zeros([0, self.rollout_len]+list(self.obs_dims)) task_data = np.zeros((0, self.rollout_len, self.task_dim)) self.obs_data = self.data.create_dataset('obs_data', data=obs_data, maxshape=(None, None, None, None, None), dtype='uint8') self.task_data = self.data.create_dataset('task_data', data=task_data, maxshape=(None, None, None), dtype='uint8') # self.data.swmr_mode=True elif hyperparams.get('data_read_only', False): f_mode = 'r' self.data_file = self.weight_dir+'/vae_buffer.hdf5' self.data = h5py.File(self.data_file, f_mode, swmr=True) self.obs_data = self.data['obs_data'] self.task_data = self.data['task_data'] # while not os.path.isfile(self.weight_dir+'/vae_buffer.hdf5'): # time.sleep(1) self.train_mode = hyperparams.get('train_mode', 'online') assert self.train_mode in ['online', 'conditional', 'unconditional'] self.use_recurrent_dynamics = hyperparams.get('use_recurrent_dynamics', False) self.use_overshooting = hyperparams.get('use_overshooting', False) self.use_prior = hyperparams.get('use_prior', True) self.load_step = hyperparams.get('load_step', 0) # self.beta = hyperparams.get('beta', 10) # self.beta_d = hyperparams.get('overshoot_beta', 1./self.T) self.beta = 0.2 # hyperparams.get('beta', 0.5) self.beta_d = hyperparams.get('overshoot_beta', 0.1) self.data_limit = hyperparams.get('data_limit', None) self.data_limit = self.data_limit if self.data_limit is not None else len(self.obs_data) self.obs_data = self.obs_data[:self.data_limit] self.task_data = self.task_data[:self.data_limit] self.dist_constraint = hyperparams.get('dist_constraint', False) self.ckpt_name = self.get_weight_file() # self.data_file = self.weight_dir+'/vae_buffer.npz' # try: # data = np.load(self.data_file, mmap_mode='w+') # except: # pass # self.obs_data = np.zeros((0, self.dT, self.dO)) # self.task_data = np.zeros((0, self.dT, self.dU)) self.max_buffer = hyperparams.get('max_buffer', 1e6) self.dist_constraint = hyperparams.get('distance_constraint', False) self.cur_lr = 1e-3 with tf.variable_scope('vae', reuse=False): self.init_network() self.init_solver() self.scope = 'vae' self.gpu_fraction = self.config['gpu_fraction'] if 'gpu_fraction' in self.config else 0.95 if 'allow_growth' in self.config and not self.config['allow_growth']: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=self.gpu_fraction) else: gpu_options = tf.GPUOptions(allow_growth=True) self.sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) init_op = tf.initialize_all_variables() variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope) self.saver = tf.train.Saver(variables) if self.use_recurrent_dynamics: zero_state = self.latent_dynamics.lstm_cell.zero_state(batch_size=1, dtype=tf.float32) self.zero_state = tuple(self.sess.run(zero_state)) try: self.saver.restore(self.sess, self.ckpt_name) except Exception as e: self.sess.run(init_op) print(('\n\nCould not load previous weights for {0} from {1}\n\n'.format(self.scope, self.weight_dir))) self.update_count = 0 self.n_updates = 0 self.update_size = self.config.get('update_size', 1) def get_weight_file(self, addendum=None): is_rnn = 'rnn' if self.use_recurrent_dynamics else 'fc' overshoot = 'overshoot' if self.use_overshooting else 'onestep' step = self.load_step mode = self.train_mode prior = 'prior' if self.use_prior else 'noprior' beta = 'beta'+str(self.beta) overshoot_beta = 'beta_d'+str(self.beta_d) limit = self.data_limit if self.data_limit is not None else len(self.obs_data) limit = str(limit)+'nsamples' dist = 'distconstr' if self.dist_constraint else 'nodistconstr' if addendum is None: ext = "vae_{0}_{1}_{2}_{3}_{4}_{5}_{6}.ckpt".format(mode, is_rnn, overshoot, prior, beta, dist, limit) else: ext = "vae_{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}.ckpt".format(mode, is_rnn, overshoot, prior, beta, dist, limit, addendum) file_name = self.weight_dir + ext return file_name def serialize_weights(self): print('Serializing vae weights') var_to_val = {} variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vae') for v in variables: var_to_val[v.name] = self.sess.run(v).tolist() return json.dumps(var_to_val) def deserialize_weights(self, json_wts, save=True): var_to_val = json.loads(json_wts) # print 'Deserializing', scopes variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vae') for var in variables: var.load(var_to_val[var.name], session=self.sess) if save: self.store_scope_weights() # print 'Weights for {0} successfully deserialized and stored.'.format(scopes) # def update_weights(self, weight_dir=None): # if weight_dir is None: # weight_dir = self.weight_dir # self.saver.restore(self.sess, weight_dir+'/vae_{0}.ckpt'.format(self.train_mode)) def store_scope_weights(self, weight_dir=None, addendum=None): if weight_dir is None: weight_dir = self.weight_dir try: variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vae') saver = tf.train.Saver(variables) saver.save(self.sess, self.get_weight_file(addendum)) print(('Saved vae weights for', self.train_mode, 'in', self.weight_dir)) except: print('Saving variables encountered an issue but it will not crash:') traceback.print_exception(*sys.exc_info()) def store_weights(self, weight_dir=None): self.store_scope_weights(weight_dir) def store(self, obs, task_list): print(('Storing data for', self.scope)) assert len(obs) == len(task_list) # self.T = len(obs) # self.obs_data = np.r_[self.obs_data, obs] # self.task_data = np.r_[self.task_data, task_list] # obs = obs[:self.T] # task_list = task_list[:self.T] obs = obs.reshape((1,)+obs.shape) task_list = task_list.reshape((1,)+task_list.shape) self.obs_data.resize((len(self.obs_data)+1,) + obs.shape[1:]) self.obs_data[-1] = obs.astype(np.uint8) self.task_data.resize((len(self.task_data)+1,) + task_list.shape[1:]) self.task_data[-1] = task_list.astype(np.uint8) # if len(self.obs_data) > self.max_buffer: # self.obs_data = self.obs_data[-self.max_buffer:] # self.task_data = self.task_data[-self.max_buffer:] self.update_count += 1 if self.update_count > self.update_size and len(self.obs_data) > 10: print('Updating vae') # self.update() self.n_updates += 1 self.update_count = 0 if not self.n_updates % 5: self.save_buffers() if self.n_updates > 10: self.store_scope_weights() self.n_updates = 0 return True return False def save_buffers(self): # np.savez(self.data_file, task_data=self.task_data, obs_data=self.obs_data) self.data.flush() def init_network(self): import tensorflow as tf self.x_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+list(self.obs_dims)) self.latent_in = tf.compat.v1.placeholder(tf.float32, shape=[1, 1, LATENT_DIM]) self.task_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+[self.task_dim]) self.latent_task_in = tf.compat.v1.placeholder(tf.float32, shape=[1, 1, self.task_dim]) self.offset_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+list(self.obs_dims)) self.before_offset_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+list(self.obs_dims)) self.training = tf.compat.v1.placeholder(tf.bool) if len(self.obs_dims) == 1: pass else: pass self.fc_in = None # tf.compat.v1.placeholder(tf.float32, shape=[None, self.task_dim]) self.offset_fc_in = None #tf.compat.v1.placeholder(tf.float32, shape=[None, self.task_dim]) self.far_offset_fc_in = None # tf.compat.v1.placeholder(tf.float32, shape=[None, self.task_dim]) # mask = tf.ones((self.batch_size, self.T)) # mask[:,-1] = 0 # self.far_offset_loss_mask = tf.constant(mask.reshape([self.batch_size*self.T])) self.encoder = Encoder() self.encode_mu, self.encode_logvar = self.encoder.get_net(self.x_in / 255., self.training, fc_in=self.fc_in, config=ENCODER_CONFIG) self.encode_posterior = tf.distributions.Normal(self.encode_mu, tf.sqrt(tf.exp(self.encode_logvar))) # self.offset_encode_mu, self.offset_encode_logvar = self.encoder.get_net(self.offset_in, self.training, fc_in=self.offset_fc_in, reuse=True, config=ENCODER_CONFIG) # self.far_offset_encode_mu, self.far_offset_encode_logvar = self.encoder.get_net(self.far_offset_in, self.training, fc_in=self.far_offset_fc_in, reuse=True, config=ENCODER_CONFIG) self.decoder_in = self.encode_mu + tf.sqrt(tf.exp(self.encode_logvar)) * tf.random_normal(tf.shape(self.encode_mu), 0, 1) self.decoder = Decoder() self.decode_mu, self.decode_logvar = self.decoder.get_net(self.decoder_in, self.training, config=DECODER_CONFIG) self.decode_posterior = tf.distributions.Normal(self.decode_mu, tf.sqrt(tf.exp(self.decode_logvar))) # self.sample_decode_mu, self.sample_decode_logvar = self.decoder.get_net(self.decoder_in, self.training, config=DECODER_CONFIG, reuse=reuse) # self.sample_decode_posterior = tf.distributions.Normal(self.sample_decode_mu, tf.sqrt(tf.exp(self.sample_decode_logvar))) if 'unconditional' not in self.train_mode: if self.use_recurrent_dynamics: self.latent_dynamics = RecurrentLatentDynamics() in_shape = tf.shape(self.decoder_in) z_in = tf.reshape(self.decoder_in, (self.batch_size, self.T, LATENT_DIM)) task_in = tf.reshape(self.task_in, (self.batch_size, self.T, self.task_dim)) mu, logvar, self.rnn_initial_state, self.rnn_final_state = self.latent_dynamics.get_net(z_in, task_in, self.T, self.training, config=LATENT_DYNAMICS_CONFIG) self.conditional_encode_mu = tf.reshape(mu, in_shape) self.conditional_encode_logvar = tf.reshape(logvar, in_shape) self.conditional_encode_posterior = tf.distributions.Normal(self.conditional_encode_mu, tf.sqrt(tf.exp(self.conditional_encode_logvar))) trans_mu, trans_logvar, self.trans_rnn_initial_state, self.trans_rnn_final_state = self.latent_dynamics.get_net(self.latent_in, self.latent_task_in, 1, self.training, config=LATENT_DYNAMICS_CONFIG, reuse=True) self.latent_trans_mu = tf.reshape(trans_mu, [1, 1, LATENT_DIM]) self.latent_trans_logvar = tf.reshape(trans_logvar, [1, 1, LATENT_DIM]) self.latent_trans_posterior = tf.distributions.Normal(self.latent_trans_mu, tf.sqrt(tf.exp(self.latent_trans_logvar))) else: self.latent_dynamics = LatentDynamics() self.conditional_encode_mu, self.conditional_encode_logvar = self.latent_dynamics.get_net(self.decoder_in, self.task_in, self.training, config=LATENT_DYNAMICS_CONFIG) self.conditional_encode_posterior = tf.distributions.Normal(self.conditional_encode_mu, tf.sqrt(tf.exp(self.conditional_encode_logvar))) self.latent_trans_mu, self.latent_trans_logvar = self.latent_dynamics.get_net(tf.reshape(self.latent_in, (1, LATENT_DIM)), tf.reshape(self.latent_task_in, (1, self.task_dim)), self.training, config=LATENT_DYNAMICS_CONFIG, reuse=True) self.latent_trans_posterior = tf.distributions.Normal(self.latent_trans_mu, tf.sqrt(tf.exp(self.latent_trans_logvar))) self.conditional_decoder_in = self.conditional_encode_mu + tf.sqrt(tf.exp(self.conditional_encode_logvar)) * tf.random_normal(tf.shape(self.conditional_encode_mu), 0, 1) self.conditional_decode_mu, self.conditional_decode_logvar = self.decoder.get_net(self.conditional_decoder_in, self.training, config=DECODER_CONFIG, reuse=True) self.conditional_decode_posterior = tf.distributions.Normal(self.conditional_decode_mu, tf.sqrt(tf.exp(self.conditional_decode_logvar))) self.offset_encode_mu, self.offset_encode_logvar = self.encoder.get_net(self.offset_in / 255., self.training, fc_in=self.offset_fc_in, config=ENCODER_CONFIG, reuse=True) self.offset_encode_posterior = tf.distributions.Normal(self.offset_encode_mu, tf.sqrt(tf.exp(self.offset_encode_logvar))) if self.dist_constraint: self.before_offset_encode_mu, self.before_offset_encode_logvar = self.Encoder.get_net(self.before_offset_in/255., self.training, fc_in=self.fc_in, config=ENCODER_CONFIG, reuse=True) self.before_offset_encode_posterior = tf.distributions.Normal(self.before_offset_encode_mu, tf.sqrt(tf.exp(self.before_offset_encode_logvar))) self.latent_prior = tf.distributions.Normal(tf.zeros_initializer()(tf.shape(self.encode_mu)), 1.) self.fitted_prior = tf.distributions.Normal(tf.zeros_initializer()(LATENT_DIM), 1.) def overshoot_latents(self, d=-1): if d < 0: d = self.T if self.use_recurrent_dynamics: latent_in = tf.reshape(self.decoder_in, [self.batch_size, self.T, LATENT_DIM]) task_in = tf.reshape(self.task_in, [self.batch_size, self.T, self.task_dim]) z_in = tf.concat([latent_in, task_in], axis=-1) latent_mu = tf.reshape(self.conditional_encode_mu, [self.batch_size, self.T, LATENT_DIM]) latent_logvar= tf.reshape(self.conditional_encode_logvar, [self.batch_size, self.T, LATENT_DIM]) cell = self.latent_dynamics.lstm_cell w = self.latent_dynamics.weights b = self.latent_dynamics.bias init_state = self.latent_dynamics.initial_state last_state = self.latent_dynamics.last_state zero_state = cell.zero_state(batch_size=self.batch_size, dtype=tf.float32) outs = {i: [] for i in range(self.T)} cur_state = zero_state for i in range(self.T): cur_out = z_in[:, i, :] for j in range(i+1, np.minimum(self.T, i+d+1)): cur_out, cur_state = cell(cur_out, cur_state) if j == i+1: next_state = cur_state cur_out = tf.nn.bias_add(tf.matmul(cur_out, w), b) outs[j].append(cur_out) cur_out = tf.split(cur_out, 2, -1)[0] cur_out = tf.concat([cur_out, task_in[:, j, :]], axis=-1) cur_state = next_state else: latent_in = tf.reshape(self.decoder_in, [self.batch_size, self.T, LATENT_DIM]) task_in = tf.reshape(self.task_in, [self.batch_size, self.T, self.task_dim]) z_in = tf.concat([latent_in, task_in], axis=-1) latent_mu = tf.reshape(self.conditional_encode_mu, [self.batch_size, self.T, LATENT_DIM]) latent_logvar= tf.reshape(self.conditional_encode_logvar, [self.batch_size, self.T, LATENT_DIM]) outs = {i: [] for i in range(self.T)} for i in range(self.T): cur_out = z_in[:, i, :] for j in range(i+1, self.T): cur_out = self.latent_dynamics.apply(cur_out) outs[j].append(cur_out) cur_out = tf.split(cur_out, 2, -1)[0] cur_out = tf.concat([cur_out, task_in[:, j, :]], axis=-1) return outs def init_solver(self): import tensorflow as tf beta = self.beta beta_d = self.beta_d # self.decoder_loss = -tf.reduce_sum(tf.log(ecode_posterior.prob(self.x_in)+1e-6), axis=tuple(range(1, len(self.decode_mu.shape)))) self.decoder_loss = tf.reduce_sum(((self.x_in / 255.) - self.decode_mu)**2)#, axis=tuple(range(1, len(self.decode_mu.shape)))) self.loss = self.decoder_loss if self.use_prior: self.kl_loss = beta*tf.reduce_sum(tf.distributions.kl_divergence(self.encode_posterior, self.latent_prior))#, axis=tuple(range(1, len(self.encode_mu.shape)))) self.loss += self.kl_loss # self.elbo = self.decoder_loss + beta * self.kl_loss # self.loss = self.elbo if 'unconditional' not in self.train_mode: # self.conditional_decoder_loss = -tf.reduce_sum(tf.log(conditional_decode_posterior.prob(self.offset_in)+1e-6))#, axis=tuple(range(1, len(self.conditional_decode_mu.shape)))) self.conditional_decoder_loss = tf.reduce_sum((self.offset_in / 255. - self.conditional_decode_mu)**2)#, axis=tuple(range(1, len(self.conditional_decode_mu.shape)))) self.loss += self.conditional_decoder_loss if self.use_prior: self.conditional_kl_loss = beta*tf.reduce_sum(tf.distributions.kl_divergence(self.conditional_encode_posterior, self.latent_prior))#, axis=tuple(range(1, len(self.conditional_encode_mu.shape)))) self.loss += self.conditional_kl_loss # self.conditional_elbo = self.conditional_decoder_loss + beta * self.conditional_kl_loss self.conditional_prediction_loss = tf.reduce_sum(tf.distributions.kl_divergence(self.conditional_encode_posterior, self.offset_encode_posterior))#, axis=tuple(range(1, len(self.conditional_encode_mu.shape)))) self.loss += self.conditional_prediction_loss if self.dist_constraint: self.near_loss = 0.1*tf.reduce_sum(tf.distributions.kl_divergence(self.encode_posterior, self.offset_encode_posterior))#, axis=tuple(range(1, len(self.far_encode_mu.shape)))) self.dist_loss = -0.1*tf.reduce_sum(tf.distributions.kl_divergence(self.offset_encode_posterior, self.before_offset_encode_posterior))#, axis=tuple(range(1, len(self.far_encode_mu.shape)))) self.loss += self.dist_loss + self.near_loss if self.use_overshooting: outs = self.overshoot_latents(5) for t in range(1, self.T): true_mu, true_logvar = self.offset_encode_mu[t*self.batch_size:(t+1)*self.batch_size], self.offset_encode_logvar[t*self.batch_size:(t+1)*self.batch_size] true_mu = tf.stop_gradient(true_mu) true_logvar = tf.stop_gradient(true_logvar) prior = tf.distributions.Normal(true_mu, tf.sqrt(tf.exp(true_logvar))) for out in outs[t]: mu, logvar = tf.split(out, 2, axis=-1) posterior = tf.distributions.Normal(mu, tf.sqrt(tf.exp(logvar))) self.loss += 1./(self.T) * beta_d * tf.reduce_sum(tf.distributions.kl_divergence(posterior, prior))#, axis=tuple(range(1, len(self.conditional_encode_mu.shape)))) # self.loss += 1./(self.T) * beta_d * tf.reduce_sum(tf.distributions.kl_divergence(posterior, self.latent_prior))#, axis=tuple(range(1, len(self.conditional_encode_mu.shape)))) self.loss = self.loss / (self.batch_size * self.T) # if self.dist_constraint: # offset_loss = tf.reduce_sum((self.encode_mu-self.offset_encode_mu)**2 axis=tuple(range(1, len(self.encode_mu.shape)))) # self.loss += offset_loss # far_offset_loss = -tf.reduce_sum((self.encode_mu-self.far_offset_encode_mu)**2 axis=tuple(range(1, len(self.encode_mu.shape)))) # self.loss += self.far_offset_loss_mask * far_offset_loss self.lr = tf.compat.v1.placeholder(tf.float32) self.opt = tf.train.AdamOptimizer(self.lr) # sess.run(tf.variables_initializer(self.opt.variables())) train_op = self.opt.minimize(self.loss) # opt_grad_vars = self.opt.compute_gradients(self.loss) # clip_grad = [(tf.clip_by_norm(grad, 1), var) for grad, var in opt_grad_vars if grad is not None] # train_op = self.opt.apply_gradients(clip_grad) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.train_op = tf.group([train_op, update_ops]) def update(self): for step in range(self.train_iters): # start_t = time.time() ind = np.random.choice(list(range(len(self.obs_data)-self.batch_size)), 1)[0] # print 'ind:', time.time() - start_t obs_batch = self.obs_data[ind:ind+self.batch_size] task_batch = self.task_data[ind:ind+self.batch_size] # print 'data:', time.time() - start_t obs = obs_batch[:, 1:self.T+1] next_obs = obs_batch[:, 2:self.T+2] before_obs = obs_batch[:, :self.T] task_path = task_batch[:, :self.T] # obs = np.concatenate([obs_batch[:, :self.T], np.zeros([self.batch_size, 1]+list(self.obs_dims))], axis=1) # next_obs = np.concatenate([ obs_batch[:, 1:self.T+1], np.zeros([self.batch_size, 1]+list(self.obs_dims))], axis=1) # far_obs = np.concatenate([obs_batch[:, 2:self.T+2], np.zeros([self.batch_size, 2]+list(self.obs_dims))], axis=1) # task_path = np.concatenate([task_batch[:, :self.T], -1*np.ones([self.batch_size, 1, self.task_dim])], axis=1) obs = obs.reshape([self.batch_size*self.T]+self.obs_dims) next_obs = next_obs.reshape([self.batch_size*self.T]+self.obs_dims) before_obs = before_obs.reshape([self.batch_size*self.T]+self.obs_dims) task_path = task_path.reshape([self.batch_size*self.T, self.task_dim]) # inds = np.random.choice(range(len(self.obs_data)), self.batch_size) # obs = [] # next_obs = [] # task_path = [] # for i in inds: # print i # next_obs_batch = np.array([self.obs_data[i] for i in inds])[0] # next_task_batch = np.array([self.task_data[i] for i in inds])[0] # obs1 = next_obs_batch[:self.T-1].reshape([self.T-1]+list(self.obs_dims)) # obs.append(np.concatenate([obs1, np.zeros([1]+list(self.obs_dims))], 0)) # obs2 = next_obs_batch[1:self.T].reshape([self.T-1]+list(self.obs_dims)) # next_obs.append(np.concatenate([np.zeros([1]+list(self.obs_dims)), obs2], 0)) # task = next_task_batch[:self.T-1].reshape([self.T-1, self.task_dim]) # task_path.append(np.concatenate([task, -1*np.ones([1, self.task_dim])], 0)) # print 'start:', time.time() - start_t self.sess.run(self.train_op, feed_dict={self.x_in: obs, self.offset_in: next_obs, self.before_offset_in: before_obs, self.task_in: task_path, self.training: True, self.lr: self.cur_lr,}) # print 'train:', time.time() - start_t # print step # inds = np.random.choice(range(len(self.task_data)), 1)#self.batch_size) # next_obs_batch = np.array([self.obs_data[i] for i in inds])[0] # next_task_batch = np.array([self.task_data[i] for i in inds])[0] # obs1 = next_obs_batch[:self.T-1].reshape([-1]+list(self.obs_dims)) # obs2 = next_obs_batch[1:self.T].reshape([-1]+list(self.obs_dims)) # task = next_task_batch[:self.T-1].reshape([-1, self.task_dim]) # self.sess.run(self.train_op, feed_dict={self.x_in: obs1, # self.offset_in: obs2, # self.task_in: task, # self.lr: self.cur_lr, # self.training: True}) self.cur_lr *= 0.99999 self.load_step += self.train_iters print(('Updated VAE', self.load_step)) def fit_prior(self): latents = [] inds = np.random.choice(list(range(len(self.obs_data))), np.minimum(1000, len(self.obs_data))) for i in range(len(inds)): print(i) batch = self.obs_data[inds[i]] latents.extend(self.get_latents(batch)) self.prior_mean = np.mean(latents, axis=0) self.prior_std = np.std(latents, axis=0) self.fitted_prior = tf.distributions.Normal(self.prior_mean, self.prior_std) def sample_prior(self): return self.sess.run(self.fitted_prior.sample()) def check_loss(self): ind = np.random.choice(list(range(len(self.obs_data)-self.batch_size)), 1)[0] obs_batch = self.obs_data[ind:ind+self.batch_size] task_batch = self.task_data[ind:ind+self.batch_size] before_obs = obs_batch[:, :self.T] obs = obs_batch[:, 1:self.T+1] next_obs = obs_batch[:, 2:self.T+2] task_path = task_batch[:, :self.T] # obs = np.concatenate([obs_batch[:, :self.T-1], np.zeros([self.batch_size, 1]+list(self.obs_dims))], axis=1) # next_obs = np.concatenate([np.zeros([self.batch_size, 1]+list(self.obs_dims)), obs_batch[:, 1:self.T]], axis=1) # task_path = np.concatenate([task_batch[:, :self.T-1], -1*np.ones([self.batch_size, 1, self.task_dim])], axis=1) before_obs = obs.reshape([self.batch_size*self.T]+self.obs_dims) obs = next_obs.reshape([self.batch_size*self.T]+self.obs_dims) next_obs = far_obs.reshape([self.batch_size*self.T]+self.obs_dims) task_path = task_path.reshape([self.batch_size*self.T, self.task_dim]) # inds = np.random.choice(range(len(self.obs_data)), self.batch_size) # obs = [] # next_obs = [] # task_path = [] # for i in inds: # print i # next_obs_batch = np.array([self.obs_data[i] for i in inds])[0] # next_task_batch = np.array([self.task_data[i] for i in inds])[0] # obs1 = next_obs_batch[:self.T-1].reshape([self.T-1]+list(self.obs_dims)) # obs.append(np.concatenate([obs1, np.zeros([1]+list(self.obs_dims))], 0)) # obs2 = next_obs_batch[1:self.T].reshape([self.T-1]+list(self.obs_dims)) # next_obs.append(np.concatenate([np.zeros([1]+list(self.obs_dims)), obs2], 0)) # task = next_task_batch[:self.T-1].reshape([1, self.task_dim]) # task_path.append(np.concatenate([task, -1*np.ones([1, self.task_dim])], 0)) return self.sess.run(self.loss, feed_dict={self.x_in: obs, self.offset_in: next_obs, self.before_offset_in: before_obs, self.task_in: task_path, self.training: True, self.lr: self.cur_lr,}) def get_latents(self, obs): if len(obs) < self.batch_size*self.T: s = obs.shape obs = np.r_[obs, np.zeros((self.batch_size*self.T-s[0], s[1], s[2], s[3]))] return self.sess.run(self.encode_mu, feed_dict={self.x_in: obs, self.training: True}) def get_next_latents(self, z, task, h=None): z = np.array(z) task = np.array(task) if self.use_recurrent_dynamics: z = z.reshape((1, 1, LATENT_DIM)) task = task.reshape((1, 1, self.task_dim)) z, h = self.sess.run([self.latent_trans_mu, self.trans_rnn_final_state], feed_dict={self.latent_in: z, self.latent_task_in: task, self.trans_rnn_initial_state: h, self.training: True}) else: z = self.sess.run(self.latent_trans_mu, feed_dict={self.latent_in: z, self.latent_task_in: task, self.training: True}) h = None return z.reshape(LATENT_DIM), h def next_latents_kl_pentalty(self, obs, task): return self.sess.run(self.conditional_kl_loss, feed_dict={self.x_in: obs, self.task_in: task, self.training: True}) def decode_latent(self, latents): if len(latents) < self.batch_size*self.T: s = latents.shape latents = np.r_[latents, np.zeros((self.batch_size*self.T-s[0], s[1]))] return self.sess.run(self.decode_mu, feed_dict={self.decoder_in: latents, self.training: True}) def test_decode(self, i=10000, t=3): o = self.obs_data[i, t].copy() z = self.get_latents(np.array([o])) d = self.decode_latent(np.array([z[0]])) d[d < 0] = 0 d[d > 1] = 1 d = (255*d).astype(np.uint8) if len(o) < self.batch_size*self.T: s = o.shape o = np.r_[[o], np.zeros((self.batch_size*self.T-1, s[0], s[1], s[2]))] d2 = self.sess.run(self.decode_mu, feed_dict={self.x_in: o, self.training: True}) d2[d2 < 0] = 0 d2[d2 > 1] = 1 d2 = (255.*d2).astype(np.uint8) return o, d, d2
# # Electrum - lightweight Bitcoin client # Copyright (C) 2011 <NAME> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # copied from code https://github.com/spesmilo/electrum # Used to convert address to scripthash import binascii import hashlib from typing import Tuple, Optional, Union from .constants import ADDRTYPE_P2PKH, ADDRTYPE_P2SH, ADDRTYPE_P2SH_ALT, SEGWIT_HRP from . import segwit_addr bfh = bytes.fromhex hfu = binascii.hexlify __b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' assert len(__b58chars) == 58 __b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:' assert len(__b43chars) == 43 def bh2u(x: bytes) -> str: """ str with hex representation of a bytes-like object >>> x = bytes((1, 2, 10)) >>> bh2u(x) '01020A' """ return hfu(x).decode('ascii') def sha256(x: Union[bytes, str]) -> bytes: x = to_bytes(x, 'utf8') return bytes(hashlib.sha256(x).digest()) def rev_hex(s: str) -> str: return bh2u(bfh(s)[::-1]) def int_to_hex(i: int, length: int=1) -> str: """Converts int to little-endian hex string. `length` is the number of bytes available """ if not isinstance(i, int): raise TypeError('{} instead of int'.format(i)) range_size = pow(256, length) if i < -(range_size//2) or i >= range_size: raise OverflowError('cannot convert int {} to hex ({} bytes)'.format(i, length)) if i < 0: # two's complement i = range_size + i s = hex(i)[2:].rstrip('L') s = "0"*(2*length - len(s)) + s return rev_hex(s) def op_push(i: int) -> str: if i<0x4c: # OP_PUSHDATA1 return int_to_hex(i) elif i<=0xff: return '4c' + int_to_hex(i) elif i<=0xffff: return '4d' + int_to_hex(i,2) else: return '4e' + int_to_hex(i,4) def push_script(data: str) -> str: """Returns pushed data to the script, automatically choosing canonical opcodes depending on the length of the data. hex -> hex ported from https://github.com/btcsuite/btcd/blob/fdc2bc867bda6b351191b5872d2da8270df00d13/txscript/scriptbuilder.go#L128 """ data = bfh(data) from .opcode import opcodes data_len = len(data) # "small integer" opcodes if data_len == 0 or data_len == 1 and data[0] == 0: return bh2u(bytes([opcodes.OP_0])) elif data_len == 1 and data[0] <= 16: return bh2u(bytes([opcodes.OP_1 - 1 + data[0]])) elif data_len == 1 and data[0] == 0x81: return bh2u(bytes([opcodes.OP_1NEGATE])) return op_push(data_len) + bh2u(data) def address_to_script(addr: str) -> str: witver, witprog = segwit_addr.decode(SEGWIT_HRP, addr) if witprog is not None: if not (0 <= witver <= 16): raise logger.exception('impossible witness version: {witver}') OP_n = witver + 0x50 if witver > 0 else 0 script = bh2u(bytes([OP_n])) script += push_script(bh2u(bytes(witprog))) return script addrtype, hash_160_ = b58_address_to_hash160(addr) if addrtype == ADDRTYPE_P2PKH: script = '76a9' # op_dup, op_hash_160 script += push_script(bh2u(hash_160_)) script += '88ac' # op_equalverify, op_checksig elif addrtype in [ADDRTYPE_P2SH, ADDRTYPE_P2SH_ALT]: script = 'a9' # op_hash_160 script += push_script(bh2u(hash_160_)) script += '87' # op_equal else: raise logger.exception('unknown address type: {addrtype}') return script def address_to_scripthash(addr: str) -> str: script = address_to_script(addr) return script_to_scripthash(script) def script_to_scripthash(script: str) -> str: h = sha256(bfh(script))[0:32] return bh2u(bytes(reversed(h))) def base_decode(v: Union[bytes, str], length: Optional[int], base: int) -> Optional[bytes]: """ decode v into a string of len bytes.""" # assert_bytes(v) v = to_bytes(v, 'ascii') if base not in (58, 43): raise ValueError('not supported base: {}'.format(base)) chars = __b58chars if base == 43: chars = __b43chars long_value = 0 for (i, c) in enumerate(v[::-1]): digit = chars.find(bytes([c])) if digit == -1: raise ValueError('Forbidden character {} for base {}'.format(c, base)) long_value += digit * (base**i) result = bytearray() while long_value >= 256: div, mod = divmod(long_value, 256) result.append(mod) long_value = div result.append(long_value) nPad = 0 for c in v: if c == chars[0]: nPad += 1 else: break result.extend(b'\x00' * nPad) if length is not None and len(result) != length: return None result.reverse() return bytes(result) def b58_address_to_hash160(addr: str) -> Tuple[int, bytes]: addr = to_bytes(addr, 'ascii') _bytes = base_decode(addr, 25, base=58) return _bytes[0], _bytes[1:21] def to_bytes(something, encoding='utf8') -> bytes: """ cast string to bytes() like object, but for python2 support it's bytearray copy """ if isinstance(something, bytes): return something if isinstance(something, str): return something.encode(encoding) elif isinstance(something, bytearray): return bytes(something) else: raise TypeError("Not a string or bytes like object")
# -*- coding: utf-8 -*- """ Контейнер для вычисления скользящей суммы. В него можно постоянно можно добавлять элементы и быстро получать сумму Пример работы с контейнером: m = MovingSum(window=4) # сумма равна 0 m.push(8) # сумма равна 8 m.push(41) # сумма равна 49 m.push(9) # сумма равна 58 m.push(6) # сумма равна 64 m.push(32) # сумма равна 88 m.push(58) # сумма равна 105 m.push(8) # сумма равна 104 m.push(78) # сумма равна 176 m.push(3) # сумма равна 147 m.push(2) # сумма равна 91 """ # встроенные модули from typing import Union, Optional, Sequence, Any # модули проекта from trivial_tools.containers.class_carousel import Carousel class MovingSum(Carousel): """ Контейнер для вычисления скользящей суммы. В него можно постоянно можно добавлять элементы и быстро получать сумму """ __slots__ = ('_sum',) def __init__(self, source: Optional[Sequence] = None, window: int = 0, sentinel: Any = object()): """ Создание экземпляра :param source: исходная коллекция элементов, на базе которой надо собрать экземпляр :param window: максимальное киличество элементов (ширина окна вычисления) :param sentinel: элемент для заполнения пустых ячеек (можно добавить свой) """ self._sum: Union[int, float] = 0 super().__init__(source, window, sentinel) def push(self, value: Union[int, float]) -> None: """ Добавить элемент в контейнер Мы выталкиваем предыдущий элемент и меняем сумму соответствующим образом. Причина для такого выполнения - избежать необходимости итерироваться по коллекции при попытке вычислить среднее значение :param value: новое число, которое соответствует сдвигу окна среднего на один элемент """ old_value = super().push(value) if old_value is not self._sentinel: self._sum -= old_value self._sum += value def restore(self) -> None: """ Сбросить параметры """ self._sum = 0 super().restore() @property def sum(self) -> float: """ Сумма всех элементов """ return self._sum def __setitem__(self, key: Union[int, slice], value: Union[int, float, Sequence]) -> None: """ Записать элемент по индексу. Обеспечивается обычный доступ к внутреннему хранилищу, просто со смещением индекса :param key: ключ индексации :param value: данные для записи (любой тип) """ if isinstance(key, int): self._sum -= self._data[self.get_real_index(key)] self._sum += value super().__setitem__(key, value)
import tkinter as tk from tkinter import messagebox from client import Client import pyperclip class GUI: def __init__(self): self.root = tk.Tk() self.root.title('Sesame') self.root.pack_propagate(True) self.root.resizable(False, False) self.client = Client() def reset_root(self): for widget in self.root.pack_slaves(): widget.destroy() def auth_view(self): self.reset_root() container = tk.Frame(master=self.root, height=100, width=300) container.pack_propagate(False) container.pack() button_login = tk.Button(master=container, text='Zaloguj', command=self.login_view) button_register = tk.Button(master=container, text='Zarejestruj', command=self.register_view) button_login.pack(side=tk.LEFT, expand=True) button_register.pack(side=tk.LEFT, expand=True) def login_view(self): self.reset_root() container = tk.Frame(master=self.root, height=200, width=300) container.pack_propagate(False) container.pack() frame_login = tk.Frame(master=container) frame_password = tk.Frame(master=container) frame_button = tk.Frame(master=container) frame_login.pack(expand=True) frame_password.pack(expand=True) frame_button.pack(expand=True) label_login = tk.Label(master=frame_login, text='Nazwa użytkownika') label_login.pack(side=tk.LEFT) entry_login = tk.Entry(master=frame_login) entry_login.pack(side=tk.LEFT) label_password = tk.Label(master=frame_password, text='<PASSWORD>') label_password.pack(side=tk.LEFT) entry_password = tk.Entry(master=frame_password, show='*') entry_password.pack(side=tk.LEFT) button_confirm = tk.Button(master=frame_button, text='Zaloguj', command=lambda: self.login(entry_login.get(), entry_password.get())) button_confirm.pack() def login(self, login, password): #TODO informacja o pustym polu try: self.client.login(login, password) messagebox.showinfo(title='Sukces', message='Poprawnie zalogowano') self.main_view() #TODO mainview except ValueError: messagebox.showwarning(title='Błąd', message='Użytkownik o podanej nazwie użytkownika i haśle nie istnieje') def register_view(self): self.reset_root() container = tk.Frame(master=self.root, height=300, width=300) container.pack_propagate(False) container.pack() frame_login = tk.Frame(master=container) frame_password = tk.Frame(master=container) frame_confirm = tk.Frame(master=container) frame_email = tk.Frame(master=container) frame_button = tk.Frame(master=container) frame_login.pack(expand=True) frame_password.pack(expand=True) frame_confirm.pack(expand=True) frame_email.pack(expand=True) frame_button.pack(expand=True) label_login = tk.Label(master=frame_login, text='Nazwa użytkownika') label_login.pack(side=tk.LEFT) entry_login = tk.Entry(master=frame_login) entry_login.pack(side=tk.LEFT) label_password = tk.Label(master=frame_password, text='Hasło') label_password.pack(side=tk.LEFT) entry_password = tk.Entry(master=frame_password, show='*') entry_password.pack(side=tk.LEFT) label_confirm = tk.Label(master=frame_confirm, text='Powtórz hasło') label_confirm.pack(side=tk.LEFT) entry_confirm = tk.Entry(master=frame_confirm, show='*') entry_confirm.pack(side=tk.LEFT) label_email = tk.Label(master=frame_email, text='Adres email') label_email.pack(side=tk.LEFT) entry_email = tk.Entry(master=frame_email) entry_email.pack(side=tk.LEFT) button_confirm = tk.Button(master=frame_button, text='Zarejestruj', command=lambda: self.register(entry_login.get(), entry_password.get(), entry_confirm.get(), entry_email.get())) button_confirm.pack() def register(self, login, password, confirmation, email): #TODO password = <PASSWORD>, <PASSWORD>ść hasła <PASSWORD> try: self.client.register(login, password, email) messagebox.showinfo(title='Sukces', message=f'Poprawnie zarejestrowano użytkownika {login}') self.auth_view() except ValueError: messagebox.showwarning(title='Błąd', message='Użytkownik o podanej nazwie użytkownika lub adresie email już istnieje') def main_view(self): self.reset_root() container = tk.Frame(master=self.root, height=250, width=300) container.pack_propagate(False) container.pack() frame_options = tk.Frame(master=container) frame_passwords = tk.Frame(master=container) frame_options.pack() frame_passwords.pack() scrollbar = tk.Scrollbar(master=frame_passwords) passwords = tk.Listbox(master=frame_passwords, yscrollcommand=scrollbar.set) button_add_password = tk.Button(master=frame_options, text='Dodaj hasło', command=self.add_password_view) button_add_password.pack(side=tk.RIGHT) button_copy_selected_pass = tk.Button(master=frame_options, text='Kopiuj hasło', command=lambda: pyperclip.copy(self.get_password(self.get_label(passwords)))) button_copy_selected_pass.pack(side=tk.RIGHT) button_copy_selected_login = tk.Button(master=frame_options, text='Kopiuj login', command=lambda: pyperclip.copy(self.get_login(self.get_label(passwords)))) button_copy_selected_login.pack(side=tk.RIGHT) scrollbar.config(command=passwords.yview) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) passwords.pack(side=tk.LEFT, fill=tk.BOTH, expand=1) for password in self.client.get_password_labels(): passwords.insert(tk.END, password) def get_label(self, list): try: return list.get(list.curselection()[0]) except IndexError: messagebox.showwarning(title='Błąd', message='Zaznacz etykietę z listy aby skopiować') def get_password(self, label): if label is None: return '' return self.client.get_password(label)[0] def get_login(self, label): if label is None: return '' return self.client.get_password(label)[1] def add_password_view(self): self.reset_root() container = tk.Frame(master=self.root, height=300, width=300) container.pack_propagate(False) container.pack() frame_label = tk.Frame(master=container) frame_password = tk.Frame(master=container) frame_username = tk.Frame(master=container) frame_button = tk.Frame(master=container) frame_label.pack(expand=True) frame_password.pack(expand=True) frame_username.pack(expand=True) frame_button.pack(expand=True) label_label = tk.Label(master=frame_label, text='Etykieta') label_label.pack(side=tk.LEFT) entry_label = tk.Entry(master=frame_label) entry_label.pack(side=tk.LEFT) label_password = tk.Label(master=frame_password, text='Hasło') label_password.pack(side=tk.LEFT) entry_password = tk.Entry(master=frame_password, show='*') entry_password.pack(side=tk.LEFT) label_username = tk.Label(master=frame_username, text='Nazwa użytkownika') label_username.pack(side=tk.LEFT) entry_username = tk.Entry(master=frame_username) entry_username.pack(side=tk.LEFT) button_confirm = tk.Button(master=frame_button, text='Dodaj', command=lambda: self.add_password(entry_label.get(), entry_password.get(), entry_username.get())) button_confirm.pack() def add_password(self, label, password, username): try: print(label, password, username) self.client.add_password(password, label, username) messagebox.showinfo(title='Sukces', message='Poprawnie dodano hasło') self.main_view() except ValueError as e: messagebox.showwarning(title='Błąd', message='Ups! Coś poszło nie tak') print(e) def run(self): self.auth_view() self.root.mainloop() GUI().run()
''' MIT License Copyright (c) 2018 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import argparse import csv import importlib import models import numpy as np import os import tensorflow as tf import time from io_util import read_pcd, save_pcd from tf_util import chamfer, earth_mover from visu_util import plot_pcd_three_views def test(args): inputs = tf.placeholder(tf.float32, (1, None, 3)) npts = tf.placeholder(tf.int32, (1,)) gt = tf.placeholder(tf.float32, (1, args.num_gt_points, 3)) model_module = importlib.import_module('.%s' % args.model_type, 'models') model = model_module.Model(inputs, npts, gt, tf.constant(1.0)) output = tf.placeholder(tf.float32, (1, args.num_gt_points, 3)) cd_op = chamfer(output, gt) emd_op = earth_mover(output, gt) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True sess = tf.Session(config=config) saver = tf.train.Saver() saver.restore(sess, args.checkpoint) os.makedirs(args.results_dir, exist_ok=True) csv_file = open(os.path.join(args.results_dir, 'results.csv'), 'w') writer = csv.writer(csv_file) writer.writerow(['id', 'cd', 'emd']) with open(args.list_path) as file: model_list = file.read().splitlines() total_time = 0 total_cd = 0 total_emd = 0 cd_per_cat = {} emd_per_cat = {} for i, model_id in enumerate(model_list): partial = read_pcd(os.path.join(args.data_dir, 'partial', '%s.pcd' % model_id)) complete = read_pcd(os.path.join(args.data_dir, 'complete', '%s.pcd' % model_id)) start = time.time() completion = sess.run(model.outputs, feed_dict={inputs: [partial], npts: [partial.shape[0]]}) total_time += time.time() - start cd, emd = sess.run([cd_op, emd_op], feed_dict={output: completion, gt: [complete]}) total_cd += cd total_emd += emd writer.writerow([model_id, cd, emd]) synset_id, model_id = model_id.split('/') if not cd_per_cat.get(synset_id): cd_per_cat[synset_id] = [] if not emd_per_cat.get(synset_id): emd_per_cat[synset_id] = [] cd_per_cat[synset_id].append(cd) emd_per_cat[synset_id].append(emd) if i % args.plot_freq == 0: os.makedirs(os.path.join(args.results_dir, 'plots', synset_id), exist_ok=True) plot_path = os.path.join(args.results_dir, 'plots', synset_id, '%s.png' % model_id) plot_pcd_three_views(plot_path, [partial, completion[0], complete], ['input', 'output', 'ground truth'], 'CD %.4f EMD %.4f' % (cd, emd), [5, 0.5, 0.5]) if args.save_pcd: os.makedirs(os.path.join(args.results_dir, 'pcds', synset_id), exist_ok=True) save_pcd(os.path.join(args.results_dir, 'pcds', '%s.pcd' % model_id), completion[0]) csv_file.close() sess.close() print('Average time: %f' % (total_time / len(model_list))) print('Average Chamfer distance: %f' % (total_cd / len(model_list))) print('Average Earth mover distance: %f' % (total_emd / len(model_list))) print('Chamfer distance per category') for synset_id in cd_per_cat.keys(): print(synset_id, '%f' % np.mean(cd_per_cat[synset_id])) print('Earth mover distance per category') for synset_id in emd_per_cat.keys(): print(synset_id, '%f' % np.mean(emd_per_cat[synset_id])) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--list_path', default='data/shapenet/test.list') parser.add_argument('--data_dir', default='data/shapenet/test') parser.add_argument('--model_type', default='pcn_emd') parser.add_argument('--checkpoint', default='data/trained_models/pcn_emd') parser.add_argument('--results_dir', default='results/shapenet_pcn_emd') parser.add_argument('--num_gt_points', type=int, default=16384) parser.add_argument('--plot_freq', type=int, default=100) parser.add_argument('--save_pcd', action='store_true') args = parser.parse_args() test(args)
<reponame>ambasta/grpc #!/usr/bin/env python # Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function import errno import filecmp import glob import os import os.path import shutil import subprocess import sys import traceback import uuid DEPS_FILE_CONTENT=""" # Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # AUTO-GENERATED BY make_grpcio_tools.py! CC_FILES={cc_files} PROTO_FILES={proto_files} CC_INCLUDE={cc_include} PROTO_INCLUDE={proto_include} """ # Bazel query result prefix for expected source files in protobuf. PROTOBUF_CC_PREFIX = '//:src/' PROTOBUF_PROTO_PREFIX = '//:src/' GRPC_ROOT = os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..')) GRPC_PYTHON_ROOT = os.path.join(GRPC_ROOT, 'tools/distrib/python/grpcio_tools') GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT = 'third_party/protobuf/src' GRPC_PROTOBUF = os.path.join(GRPC_ROOT, GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT) GRPC_PROTOC_PLUGINS = os.path.join(GRPC_ROOT, 'src/compiler') GRPC_PYTHON_PROTOBUF = os.path.join(GRPC_PYTHON_ROOT, 'third_party/protobuf/src') GRPC_PYTHON_PROTOC_PLUGINS = os.path.join(GRPC_PYTHON_ROOT, 'grpc_root/src/compiler') GRPC_PYTHON_PROTOC_LIB_DEPS = os.path.join(GRPC_PYTHON_ROOT, 'protoc_lib_deps.py') GRPC_INCLUDE = os.path.join(GRPC_ROOT, 'include') GRPC_PYTHON_INCLUDE = os.path.join(GRPC_PYTHON_ROOT, 'grpc_root/include') BAZEL_DEPS = os.path.join(GRPC_ROOT, 'tools/distrib/python/bazel_deps.sh') BAZEL_DEPS_PROTOC_LIB_QUERY = '//:protoc_lib' BAZEL_DEPS_COMMON_PROTOS_QUERY = '//:well_known_protos' def bazel_query(query): output = subprocess.check_output([BAZEL_DEPS, query]) return output.splitlines() def get_deps(): """Write the result of the bazel query `query` against protobuf to `out_file`.""" cc_files_output = bazel_query(BAZEL_DEPS_PROTOC_LIB_QUERY) cc_files = [ name[len(PROTOBUF_CC_PREFIX):] for name in cc_files_output if name.endswith('.cc') and name.startswith(PROTOBUF_CC_PREFIX)] proto_files_output = bazel_query(BAZEL_DEPS_COMMON_PROTOS_QUERY) proto_files = [ name[len(PROTOBUF_PROTO_PREFIX):] for name in proto_files_output if name.endswith('.proto') and name.startswith(PROTOBUF_PROTO_PREFIX)] deps_file_content = DEPS_FILE_CONTENT.format( cc_files=cc_files, proto_files=proto_files, cc_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT), proto_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT)) return deps_file_content def long_path(path): if os.name == 'nt': return '\\\\?\\' + path else: return path def atomic_file_copy(src, dst): """Based on the lock-free-whack-a-mole algorithm, depending on filesystem renaming being atomic. Described at http://stackoverflow.com/a/28090883. """ try: if filecmp.cmp(src, dst): return except: pass dst_dir = os.path.abspath(os.path.dirname(dst)) dst_base = os.path.basename(dst) this_id = str(uuid.uuid4()).replace('.', '-') temporary_file = os.path.join(dst_dir, '{}.{}.tmp'.format(dst_base, this_id)) mole_file = os.path.join(dst_dir, '{}.{}.mole.tmp'.format(dst_base, this_id)) mole_pattern = os.path.join(dst_dir, '{}.*.mole.tmp'.format(dst_base)) src = long_path(src) dst = long_path(dst) temporary_file = long_path(temporary_file) mole_file = long_path(mole_file) mole_pattern = long_path(mole_pattern) shutil.copy2(src, temporary_file) try: os.rename(temporary_file, mole_file) except: print('Error moving temporary file {} to {}'.format(temporary_file, mole_file), file=sys.stderr) print('while trying to copy file {} to {}'.format(src, dst), file=sys.stderr) raise for other_file in glob.glob(mole_pattern): other_id = other_file.split('.')[-3] if this_id == other_id: pass elif this_id < other_id: try: os.remove(other_file) except: pass else: try: os.remove(mole_file) except: pass this_id = other_id mole_file = other_file try: if filecmp.cmp(src, dst): try: os.remove(mole_file) except: pass return except: pass try: os.rename(mole_file, dst) except: pass def main(): os.chdir(GRPC_ROOT) for source, target in [ (GRPC_PROTOBUF, GRPC_PYTHON_PROTOBUF), (GRPC_PROTOC_PLUGINS, GRPC_PYTHON_PROTOC_PLUGINS), (GRPC_INCLUDE, GRPC_PYTHON_INCLUDE)]: for source_dir, _, files in os.walk(source): target_dir = os.path.abspath(os.path.join(target, os.path.relpath(source_dir, source))) try: os.makedirs(target_dir) except OSError as error: if error.errno != errno.EEXIST: raise for relative_file in files: source_file = os.path.abspath(os.path.join(source_dir, relative_file)) target_file = os.path.abspath(os.path.join(target_dir, relative_file)) atomic_file_copy(source_file, target_file) try: protoc_lib_deps_content = get_deps() except Exception as error: # We allow this script to succeed even if we couldn't get the dependencies, # as then we can assume that even without a successful bazel run the # dependencies currently in source control are 'good enough'. sys.stderr.write("Got non-fatal error:\n") traceback.print_exc(file=sys.stderr) return # If we successfully got the dependencies, truncate and rewrite the deps file. with open(GRPC_PYTHON_PROTOC_LIB_DEPS, 'w') as deps_file: deps_file.write(protoc_lib_deps_content) if __name__ == '__main__': main()
<reponame>thehyve/python_fhir2transmart<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for the fhir2transmart module. """ import pytest from fhir2transmart.fhir_reader import FhirReader from fhir2transmart.mapper import Mapper from transmart_loader.transmart import DataCollection @pytest.fixture def empty_fhir_bundle() -> dict: return { 'resourceType': 'Bundle', 'type': 'collection', 'entry': [ ] } @pytest.fixture def simple_fhir_bundle() -> dict: return { 'resourceType': 'Bundle', 'type': 'collection', 'entry': [ { 'resource': { 'resourceType': 'Patient', 'id': 'SUBJ0', 'birthDate': '2000-01-01', 'gender': 'female' } }, { 'resource': { 'resourceType': 'Encounter', 'id': 'VISIT0', 'subject': { 'reference': 'urn:uuid:SUBJ0' }, 'status': 'finished', 'period': { 'start': '2010-12-10T09:30', 'end': '2010-12-10T17:00' } } }, { 'resource': { 'resourceType': 'Condition', 'subject': { 'reference': 'urn:uuid:SUBJ0' }, 'code': { 'text': 'Decease X', 'coding': [{ 'system': 'ICD-10', 'code': 'Code-X' }] }, 'bodySite': [{ 'text': 'Head', 'coding': [{ 'system': 'SNOMEDCTBodyStructures', 'code': 'Example body site' }] }], 'encounter': { 'reference': 'urn:uuid:VISIT0' }, 'onsetDateTime': '2010-10-01T13:15' } }, { 'resource': { 'resourceType': 'Patient', 'id': 'SUBJ1', 'birthDate': '2002-12-10', 'gender': 'male' } }, { 'resource': { 'resourceType': 'Encounter', 'id': 'VISIT1', 'subject': { 'reference': 'urn:uuid:SUBJ1' }, 'status': 'finished', 'class': { 'code': 'in' } } }, { 'resource': { 'resourceType': 'Condition', 'subject': { 'reference': 'urn:uuid:SUBJ1' }, 'code': { 'text': 'Decease Y', 'coding': [{ 'system': 'ICD-10', 'code': 'Code-Y' }] }, 'encounter': { 'reference': 'urn:uuid:VISIT1' }, 'onsetDateTime': '2010-04-01T13:15', 'abatementDateTime': '2010-08-01T13:15' } }, ] } def test_read_empty_bundle(empty_fhir_bundle): collection = FhirReader.read(empty_fhir_bundle) result = Mapper.map(collection) assert len(result.studies) == 1 assert len(result.patients) == 0 def test_read_simple_bundle(simple_fhir_bundle): collection = FhirReader.read(simple_fhir_bundle) result: DataCollection = Mapper.map(collection) assert len(result.studies) == 1 assert len(result.patients) == 2 assert len(result.visits) == 2 assert len(result.observations) == 6
<gh_stars>0 """ Django settings for shark project. Generated by 'django-admin startproject' using Django 3.2.7. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from datetime import timedelta from decimal import Decimal from pathlib import Path from typing import List, Tuple from django.utils.translation import ugettext_lazy as _ # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ # Shark applications 'shark', 'shark.base', 'shark.banking', 'shark.billing', 'shark.customer', 'shark.documents', 'shark.issue', 'shark.project', 'shark.sepa', 'shark.vendor', # Admin tools 'admin_tools', 'admin_tools.theming', 'admin_tools.menu', 'admin_tools.dashboard', # Django contrib 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Misc 'dinbrief', 'taggit', 'localflavor', 'rest_framework', 'django_minio_backend', # https://github.com/theriverman/django-minio-backend ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'shark.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], 'loaders': [ 'django.template.loaders.app_directories.Loader', 'admin_tools.template_loaders.Loader', ] }, }, ] WSGI_APPLICATION = 'shark.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = {} CACHES = {} # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = BASE_DIR / 'htdocs' / 'static' # Media files MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR / 'htdocs' / 'media' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' FORMAT_MODULE_PATH = 'shark.base.formats' MINIO_ENDPOINT = 'minio.your-company.co.uk' MINIO_EXTERNAL_ENDPOINT = "external-minio.your-company.co.uk" # Default is same as MINIO_ENDPOINT MINIO_EXTERNAL_ENDPOINT_USE_HTTPS = True # Default is same as MINIO_USE_HTTPS MINIO_ACCESS_KEY = 'yourMinioAccessKey' MINIO_SECRET_KEY = 'yourVeryS3cr3tP4ssw0rd' MINIO_USE_HTTPS = True MINIO_URL_EXPIRY_HOURS = timedelta(days=1) # Default is 7 days (longest) if not defined MINIO_CONSISTENCY_CHECK_ON_START = False MINIO_PRIVATE_BUCKETS = [ 'django-backend-dev-private', ] MINIO_PUBLIC_BUCKETS = [ 'documents', ] MINIO_POLICY_HOOKS: List[Tuple[str, dict]] = [] # MINIO_MEDIA_FILES_BUCKET = 'my-media-files-bucket' # replacement for MEDIA_ROOT # MINIO_STATIC_FILES_BUCKET = 'my-static-files-bucket' # replacement for STATIC_ROOT MINIO_BUCKET_CHECK_ON_SAVE = True # Default: True // Creates bucket if missing, then save ADMIN_TOOLS_INDEX_DASHBOARD = 'shark.dashboard.CustomIndexDashboard' ADMIN_TOOLS_APP_INDEX_DASHBOARD = 'shark.dashboard.CustomAppIndexDashboard' REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.BasicAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAdminUser', ) } SHARK = { 'VAT_RATE_CHOICES': ( (Decimal('0.19'), '19%'), (Decimal('0.07'), '7%'), (Decimal('0.00'), '0%'), ), 'CUSTOMER': { 'NUMBER_GENERATOR': 'shark.utils.id_generators.InitialAsNumber', 'TYPE_CHOICES': [('default', _('Default'))], 'TYPE_DEFAULT': 'default', }, 'INVOICE': { 'BACKGROUND': { # 'FIRST_PAGE': ... # 'LATER_PAGE': ... }, 'SENDER': { "name": "settings.SHARK['INVOICE']['SENDER']['name']", "street": "settings.SHARK['INVOICE']['SENDER']['street']", "postal_code": "settings.SHARK['INVOICE']['SENDER']['postal_code']", "city": "settings.SHARK['INVOICE']['SENDER']['city']", }, 'TERMS': [ "settings.SHARK['INVOICE']['TERMS']", ], 'NUMBER_GENERATOR': 'shark.utils.id_generators.YearCustomerN', 'UNIT_CHOICES': [ ('s', _('second [s]')), ('min', _('minute [min]')), ('h', _('hour [h]')), ('d', _('day [d]')), ('w', _('week [w]')), ('m', _('month [m]')), ('a', _('year [a]')), ], 'PAYMENT_TIMEFRAME': timedelta(days=14), }, 'SEPA': { 'CREDITOR_ID': '', 'CREDITOR_NAME': '', 'CREDITOR_COUNTRY': 'DE', 'CREDITOR_IBAN': '', 'CREDITOR_BIC': '', 'DEFAULT_MANDATE_TYPE': 'CORE', 'TRANSACTION_REFERENCE_PREFIX': '', 'PRE_NOTIFICATION_EMAIL_FROM': '', 'PRE_NOTIFICATION_EMAIL_BCC': [], }, }
#! /usr/bin/env python # -*- coding: utf-8 -*- import random import itertools from collections import defaultdict, OrderedDict def pct(x, total): return '%04.1f%%' % (100*float(x)/total) x=[8,4,6,2]; a = [1]*x[0] + [2]*x[1] + [3]*x[2] + [4]*x[3] x=[5,6,5,4]; b = [1]*x[0] + [2]*x[1] + [3]*x[2] + [4]*x[3] x=[4,5,6,5]; c = [1]*x[0] + [2]*x[1] + [3]*x[2] + [4]*x[3] x=[2,6,4,8]; d = [1]*x[0] + [2]*x[1] + [3]*x[2] + [4]*x[3] cards = [ {'Pro': False, 'Stamina': False, 'a': 1, 'b': 1, 'c': 1, 'd': 1, 'crit_fail': True}, {'Pro': False, 'Stamina': False, 'a': 1, 'b': 2, 'c': 3, 'd': 2}, {'Pro': False, 'Stamina': False, 'a': 1, 'b': 4, 'c': 3, 'd': 4}, {'Pro': False, 'Stamina': False, 'a': 2, 'b': 2, 'c': 3, 'd': 4}, {'Pro': False, 'Stamina': False, 'a': 2, 'b': 3, 'c': 2, 'd': 3}, {'Pro': False, 'Stamina': False, 'a': 1, 'b': 2, 'c': 2, 'd': 3}, {'Pro': False, 'Stamina': False, 'a': 2, 'b': 1, 'c': 2, 'd': 3}, {'Pro': True, 'Stamina': False, 'a': 1, 'b': 1, 'c': 4, 'd': 1}, {'Pro': True, 'Stamina': False, 'a': 2, 'b': 2, 'c': 1, 'd': 2}, {'Pro': False, 'Stamina': False, 'a': 3, 'b': 1, 'c': 2, 'd': 2}, {'Pro': False, 'Stamina': True, 'a': 4, 'b': 4, 'c': 4, 'd': 4, 'crit_win': True}, {'Pro': False, 'Stamina': True, 'a': 1, 'b': 3, 'c': 3, 'd': 4}, {'Pro': False, 'Stamina': True, 'a': 3, 'b': 3, 'c': 4, 'd': 4}, {'Pro': False, 'Stamina': True, 'a': 3, 'b': 4, 'c': 3, 'd': 4}, {'Pro': True , 'Stamina': True, 'a': 1, 'b': 2, 'c': 2, 'd': 4}, {'Pro': True, 'Stamina': True, 'a': 3, 'b': 2, 'c': 1, 'd': 2}, {'Pro': True, 'Stamina': True, 'a': 4, 'b': 1, 'c': 1, 'd': 2}, {'Pro': False, 'Stamina': True, 'a': 3, 'b': 4, 'c': 3, 'd': 4}, {'Pro': False, 'Stamina': True, 'a': 1, 'b': 3, 'c': 4, 'd': 2}, {'Pro': False, 'Stamina': True, 'a': 3, 'b': 3, 'c': 4, 'd': 3}, ] def count_checks_in_suit(cards, suit): return sum(card[suit]-2 for card in cards if card[suit] >= 3) def count_exes_in_suit(cards, suit): return sum(3-card[suit] for card in cards if card[suit] < 3) blessing_cards = [ {'Pro': False, 'Stamina': False, 'a': 4, 'b': 3, 'c': 4, 'd': 4, 'blessing': 'copper'}, {'Pro': False, 'Stamina': False, 'a': 3, 'b': 4, 'c': 4, 'd': 4, 'blessing': 'copper'}, {'Pro': False, 'Stamina': False, 'a': 4, 'b': 3, 'c': 4, 'd': 4, 'blessing': 'copper'}, {'Pro': False, 'Stamina': False, 'a': 3, 'b': 4, 'c': 4, 'd': 4, 'blessing': 'copper'}, {'Pro': False, 'Stamina': False, 'a': 4, 'b': 4, 'c': 4, 'd': 4, 'blessing': 'gold'}, {'Pro': False, 'Stamina': False, 'a': 4, 'b': 4, 'c': 4, 'd': 4, 'blessing': 'gold'}, ] wound_cards = [ {'Pro': False, 'Stamina': False, 'a': 1, 'b': 1, 'c': 1, 'd': 2, 'blessing': 'wound'}, {'Pro': False, 'Stamina': False, 'a': 1, 'b': 1, 'c': 2, 'd': 1, 'blessing': 'wound'}, ] class Card(object): SIDE = 'a' def __init__(self, data): self._data = data def __str__(self): return '<{a} | {b} | {c} | {d} ||| P {Pro:d} | {Stamina:d}>'.format(**self._data) def __repr__(self): return '<{a} | {b} | {c} | {d} ||| P {Pro:d} | {Stamina:d}>'.format(**self._data) def __lt__(self, other): return ( self._data[self.SIDE] < other._data[self.SIDE] ) or ( self._data[self.SIDE] == other._data[self.SIDE] and self._data['Pro'] < other._data['Pro'] ) def __eq__(self, other): return ( self._data[self.SIDE] == other._data[self.SIDE] and self._data['Pro'] == other._data['Pro'] ) def __getitem__(self, key): return self._data[key] def read_result(self): return self._data[self.SIDE] def count_checks(self, suit): if self._data[suit] == 4: return 2 if self._data[suit] == 3: return 1 return 0 def count_exes(self, suit): if self._data[suit] == 1: return 2 if self._data[suit] == 2: return 1 return 0 Deckahedron = [Card(x) for x in cards] def flip(deck): # Takes a card off the deck and returns that new deck new_deck = deck[:] random.shuffle(new_deck) res = new_deck.pop() return res, new_deck def flip_cards(deck, num=1): """ Takes a card off the deck and returns that new deck Throws IndexError if there weren't eneough cards to flip """ remaining = deck[:] random.shuffle(remaining) flipped = [] for i in range(num): flipped.append(remaining.pop()) return flipped, remaining def one_flip(deck): return flip(deck)[0] def two_flip(deck): a, deck = flip(deck) b, deck = flip(deck) return a, b def three_flip(deck): a, deck = flip(deck) b, deck = flip(deck) c, deck = flip(deck) return a, b, c def four_flip(deck): a, deck = flip(deck) b, deck = flip(deck) c, deck = flip(deck) d, deck = flip(deck) return a, b, c, d def contest_results(deck_a, deck_b, flip_fn_a, flip_fn_b): return flip_fn_a(deck_a), flip_fn_b(deck_b) def resolve_contest(deck_a, deck_b, mod_a=0, mod_b=0): fns = { -3: lambda deck: min(four_flip(deck)), -2: lambda deck: min(three_flip(deck)), -1: lambda deck: min(two_flip(deck)), 0: lambda deck: one_flip(deck), 1: lambda deck: max(two_flip(deck)), 2: lambda deck: max(three_flip(deck)), } a = fns[mod_a](deck_a) b = fns[mod_b](deck_b) return cmp(a, b) class TieReflip(object): """ No ties allowed Just re-draw whenever there's a tie. This can be really shitty, because (d,d,2,2) or (a,a,-2,-2) can result in 1.7x the needed draws to get to a resolution. (see analyze_contest_tiereflip) This should be reserved only for epic struggles where a prolonged stalemate makes a lot of narrative sense. """ ties = 0 num_contests = 0 tie_distribution = defaultdict(int) @classmethod def resolve_contest(cls, deck_a, deck_b, mod_a=0, mod_b=0): contest_ties = 0 cls.num_contests += 1 result = resolve_contest(deck_a, deck_b, mod_a, mod_b) while result == 0: contest_ties += 1 result = resolve_contest(deck_a, deck_b, mod_a, mod_b) cls.tie_distribution[contest_ties] += 1 cls.ties += contest_ties return result @classmethod def clear(cls): cls.ties = 0 cls.num_contests = 0 cls.tie_distribution = defaultdict(int) @classmethod def analysis(cls): return 'Ties %s (%2.1fx), %s' % ( cls.ties, cls.multiple(), {k:pct(v, cls.num_contests) for (k,v) in cls.tie_distribution.items()} ) @classmethod def multiple(cls): return float(cls.num_contests + cls.ties)/cls.num_contests class TieCountChecks(object): """ No ties allowed When a tie happens, try these: * most # of ✔s on the flipped suit on all flipped cards * most # of ✔s on all suits on all flipped cards * least # of ✗s on the flipped suit on all flipped cards * least # of ✗s on all suits on all flipped cards * flip again if none of that worked """ ties = 0 num_contests = 0 tie_distribution = defaultdict(int) kind_distribution = defaultdict(int) @classmethod def resolve_contest_cards(cls, cards_a, suit_a, cards_b, suit_b, mod_a=0, mod_b=0, contest_ties=0 ): if contest_ties == 0: cls.num_contests += 1 howmany = { -3: 4, -2: 3, -1: 2, 0: 1, 1: 2, 2: 3, } def tallys(flipped, mod, suit): if mod < 0: score_fn = min else: score_fn = max raw_score = score_fn(card[suit] for card in flipped) checks_in_suit = count_checks_in_suit(flipped, suit) checks_total = sum(count_checks_in_suit(flipped, s) for s in ['a','b','c','d']) exes_in_suit = count_exes_in_suit(flipped, suit) exes_total = sum(count_exes_in_suit(flipped, s) for s in ['a','b','c','d']) return [ raw_score, checks_in_suit, checks_total, exes_in_suit, exes_total ] flipped_a, remaining_a = flip_cards(cards_a, howmany[mod_a]) tally_a = tallys(flipped_a, mod_a, suit_a) flipped_b, remaining_b = flip_cards(cards_b, howmany[mod_b]) tally_b = tallys(flipped_b, mod_b, suit_b) for i in range(len(tally_a)): result = cmp(tally_a[i], tally_b[i]) if result != 0: if i > 0: #print tally_a, tally_b cls.kind_distribution[i] += 1 cls.tie_distribution[contest_ties] += 1 cls.ties += contest_ties return result return cls.resolve_contest_cards( remaining_a, suit_a, remaining_b, suit_b, mod_a, mod_b, contest_ties = contest_ties + 1 ) @classmethod def clear(cls): cls.ties = 0 cls.num_contests = 0 cls.tie_distribution = defaultdict(int) cls.kind_distribution = defaultdict(int) @classmethod def analysis(cls): mapp = { 0: 'raw_score', 1: 'chk in_suit', 2: 'chk total', 3: 'x in_suit', 4: 'x total', } return 'Ties %s (%2.1fx) \n %s \n %s' % ( cls.ties, cls.multiple(), {k:pct(v, cls.num_contests) for (k,v) in cls.tie_distribution.items()}, {mapp[k]:v for (k,v) in cls.kind_distribution.items()}, ) @classmethod def multiple(cls): return float(cls.num_contests + cls.ties)/cls.num_contests def resolve_check(deck, mod=0): fns = { -3: lambda deck: min(four_flip(deck)), -2: lambda deck: min(three_flip(deck)), -1: lambda deck: min(two_flip(deck)), 0: lambda deck: one_flip(deck), 1: lambda deck: max(two_flip(deck)), 2: lambda deck: max(three_flip(deck)), } result = fns[mod](deck) return result > 2 def analyze(func, possible_results, *args): tries = 10000 results = { x:0 for x in possible_results } for i in range(tries): results[func(*args)] += 1 percents = [ pct(results[x], tries) for x in possible_results ] return (results, ' / '.join(percents)) def analyze_check(*args): return analyze(resolve_check, [True, False], *args) def analyze_contest(*args): return analyze(resolve_contest, [-1, 0, 1], *args) def analyze_contest_tiereflip(*args): TieReflip.clear() analysis = analyze(TieReflip.resolve_contest, [-1, 1], *args) return analysis[0], analysis[1], TieReflip.analysis() def analyze_contest_tiecountchecks(*args): TieCountChecks.clear() analysis = analyze(TieCountChecks.resolve_contest_cards, [-1, 1], *args) return analysis[0], analysis[1], TieCountChecks.analysis() def proficiency_check(mod): c = cards[:] flips = abs(mod) + 1 result = 0 for i in range(flips): random.shuffle(c) card = c.pop() if card['Pro']: result += 1 return result def analyze_proficiency_check(mod): results = { x:0 for x in range(5) } for i in range(10000): results[proficiency_check(mod)] += 1 return results def lose_stamina(deck, stamina_loss): new_deck = [] discard_pile = [] lost_stamina = [] popped = 0 for card in deck: if popped == stamina_loss: new_deck.append(card) continue if card.get('Stamina'): popped += 1 else: new_deck.append(card) return new_deck, discard_pile, lost_stamina def take_worst(rank, card_list): taken = card_list[0] for card in card_list[1:]: if card.get(rank) == taken.get(rank): if card.get('Pro'): taken = card elif card.get(rank) < taken.get(rank): taken = card return taken def take_best(rank, card_list): taken = card_list[0] for card in card_list[1:]: if card.get(rank) == taken.get(rank): if card.get('Pro'): taken = card elif card.get(rank) > taken.get(rank): taken = card return taken def green_token_check(mod, rank, stamina_loss): deck = cards[:] random.shuffle(deck) new_deck, discard_pile, _ = lose_stamina(deck, stamina_loss) deck = discard_pile + new_deck random.shuffle(deck) results, deck = flip_cards(deck, num=1+abs(mod)) if mod < 0: resolving_card = take_worst(rank, results) else: resolving_card = take_best(rank, results) return resolving_card.get('Pro') def analyze_green_token_check(): ranks = 'abcd' mods = [-2, -1, 0, 1] stamina_losses = [0, 8] combos = itertools.product(ranks, mods, stamina_losses) for rank, mod, stamina_loss in combos: success = 0 sample_size = 200000 #sample_size = 2000 for i in range(sample_size): success += int(green_token_check(mod, rank, stamina_loss)) print( '%s/%s (%s) Rank %s, Mod %s, Stamina loss: %s' % ( success, sample_size, pct(success,sample_size), rank, mod, stamina_loss )) def p2_check(suit, mod): """ Only take the 'Pro' if it's on the card that got used """ if mod > 0: raise ValueError('die') c = cards[:] flips = abs(mod) + 1 results = [] for i in range(flips): random.shuffle(c) results.append(c.pop()) score = 100 used = None for card in results: if card[suit] < score: score = card[suit] used = card if used['Pro']: return 1 return 0 def analyze_p2_check(*args): results = { x:0 for x in range(2) } for i in range(10000): results[p2_check(*args)] += 1 return results good_six_four_distributions = [ #00 # # #01 # # ## #02 # # ## ## # # # #03 ### # ## ## ## ## ## #04 ##### ### ## ## ##### ### #### #05 ### #### ## ## #### ######## ##### #06 ### #### ## ### ### ## #### #07 ## ## #### ### ### ## ## #08 ## # ## ## ## # ## #09 # # ## ## # #10 # ([4, 0, 1, 1, 2, 4, 3, 2, 2, 0, 3, 4, 0, 0, 4, 1, 2, 3, 1, 3], [1, 4, 3, 6, 0, 2, 6, 1, 2, 5, 0, 2, 5, 6, 4, 3, 1, 5, 3, 4]), ([2, 1, 3, 4, 2, 3, 0, 1, 4, 1, 4, 3, 1, 0, 2, 4, 0, 0, 3, 2], [1, 5, 3, 4, 2, 6, 1, 6, 1, 3, 6, 3, 4, 4, 5, 2, 5, 0, 2, 0]), ([2, 1, 4, 1, 3, 0, 4, 0, 1, 4, 0, 2, 2, 0, 3, 3, 2, 1, 3, 4], [6, 4, 3, 6, 2, 2, 5, 1, 3, 2, 3, 4, 5, 0, 4, 0, 6, 1, 1, 5]), ([2, 2, 4, 1, 3, 0, 4, 0, 1, 4, 0, 4, 1, 3, 0, 2, 3, 1, 3, 2], [6, 4, 3, 6, 2, 2, 5, 5, 0, 2, 3, 4, 0, 3, 4, 5, 6, 1, 1, 1]), ([4, 0, 1, 1, 2, 4, 3, 2, 4, 0, 3, 2, 0, 0, 4, 1, 3, 2, 1, 3], [1, 4, 3, 6, 0, 4, 2, 1, 2, 5, 0, 6, 5, 6, 2, 3, 1, 5, 3, 4]), ([4, 0, 1, 1, 2, 2, 3, 2, 4, 0, 3, 2, 0, 0, 3, 1, 4, 4, 1, 3], [1, 4, 4, 6, 0, 3, 2, 1, 1, 5, 0, 6, 5, 6, 2, 3, 2, 5, 3, 4]), ([4, 2, 1, 1, 2, 4, 3, 2, 4, 0, 3, 2, 0, 0, 4, 1, 3, 0, 1, 3], [1, 4, 3, 6, 0, 4, 2, 1, 2, 5, 0, 6, 5, 6, 2, 3, 1, 5, 3, 4]), #00 # # #01 ## # ## # #02 ### ## # ## ## # #03 ### ### ## ## ## ### ## #04 ### #### ## #### ### ## ### #05 ### ##### ## ###### ## ## ##### #06 ## #### ## ### ## ## ### #07 ## ### # ## ## ## ## #08 ## # ## # ## ## ## #09 # ## # ## ## # #10 # ## # # ([2, 2, 2, 1, 3, 0, 4, 0, 1, 4, 0, 1, 4, 3, 0, 3, 2, 1, 3, 4], [6, 4, 3, 6, 2, 2, 5, 3, 1, 2, 3, 4, 0, 0, 4, 5, 5, 1, 1, 6]), # this one is nice ([4, 1, 1, 1, 2, 4, 3, 2, 4, 0, 3, 2, 0, 0, 4, 1, 3, 2, 0, 3], [1, 4, 3, 6, 4, 0, 2, 1, 2, 5, 0, 6, 5, 6, 2, 3, 1, 5, 3, 4]), ([2, 4, 3, 4, 2, 3, 0, 1, 1, 4, 4, 2, 2, 0, 3, 1, 0, 0, 3, 1], [1, 5, 2, 4, 5, 6, 1, 2, 1, 6, 6, 3, 4, 4, 5, 0, 2, 0, 3, 3]), ([4, 1, 1, 1, 2, 4, 3, 2, 3, 0, 3, 2, 0, 0, 4, 1, 3, 2, 0, 4], [1, 6, 3, 4, 4, 0, 2, 1, 2, 5, 6, 0, 5, 6, 2, 3, 1, 5, 3, 4]), # this one is nice ([2, 4, 3, 4, 2, 0, 3, 1, 1, 4, 1, 2, 2, 0, 3, 1, 0, 0, 3, 4], [1, 5, 2, 4, 5, 6, 1, 3, 1, 6, 2, 3, 4, 4, 5, 0, 2, 0, 6, 3]), ([2, 2, 4, 1, 3, 0, 4, 0, 1, 3, 0, 4, 1, 3, 0, 2, 4, 1, 3, 2], [6, 1, 5, 6, 2, 2, 5, 3, 0, 2, 3, 4, 0, 3, 4, 5, 6, 1, 1, 4]), ([4, 2, 1, 1, 1, 4, 3, 2, 4, 0, 3, 2, 0, 0, 4, 2, 3, 0, 1, 3], [1, 4, 3, 6, 0, 4, 2, 1, 2, 5, 0, 6, 5, 6, 5, 3, 1, 2, 3, 4]) ] selected_six_four_distribution = 7 # I've distributed these such that the d4s are evenly split up # among the exaustion / non-exaustion sets dice_print_rules = [ # 0 - d4:0 ( (0, 2), 'six_sw_1 six_ne_1' ), # 1 - d4:1 ( (1, 1), 'four_ne_1 six_sw_1' ), # 2 - d4:2 ( (2, 6), 'four_sw_1 four_ne_1 six_ne_2 six_nw_2 six_se_2' ), # 3 - d4:3 ( (3, 5), 'four_sw_1 four_se_1 four_nw_1 six_ne_2 six_nw_1 six_sw_1 six_se_1' ), # 4 - d4:4 ( (4, 0), 'four_nw_1 four_ne_1 four_se_1 four_sw_1' ), # 5 - d4:0 ( (0, 3), 'six_sw_2 six_ne_1' ), # 6 - d4:1 ( (1, 6), 'four_sw_1 six_ne_2 six_nw_2 six_se_2' ), # 7 - d4:2 ( (2, 4), 'four_sw_1 four_ne_1 six_nw_2 six_se_2' ), # 8 - d4:3 ( (3, 2), 'four_sw_1 four_nw_1 four_ne_1 six_nw_1 six_se_1' ), # 9 - d4:4 ( (4, 5), 'four_sw_1 four_se_1 four_nw_1 four_ne_1 six_ne_2 six_nw_1 six_sw_1 six_se_1' ), # 10 ( (4, 6), 'four_nw_1 four_ne_1 four_se_1 four_sw_1 six_sw_2 six_sw_1 six_ne_1 six_ne_2' ), # 11 ( (3, 1), 'four_sw_1 four_nw_1 four_ne_1 six_ne_1' ), # 12 ( (2, 5), 'four_sw_1 four_ne_1 six_ne_1 six_nw_2 six_se_2' ), # 13 ( (1, 4), 'four_sw_1 six_ne_2 six_nw_1 six_se_1' ), # 14 ( (0, 3), 'six_sw_2 six_ne_1' ), # 15 ( (4, 2), 'four_nw_1 four_ne_1 four_se_1 four_sw_1 six_sw_1 six_ne_1' ), # 16 ( (3, 0), 'four_sw_1 four_nw_1 four_ne_1' ), # 17 ( (2, 3), 'four_nw_1 four_se_1 six_sw_2 six_ne_1' ), # 18 ( (1, 1), 'four_ne_1 six_sw_1 ' ), # 19 ( (0, 4), 'six_nw_1 six_ne_1 six_se_1 six_sw_1' ), ] spot_it_rules = [ (0, 1, 2, 3), (1, 9, 6, 4), (3, 5, 7, 6), (4, 0, 5, 8), (2, 7, 8, 9) ] spot_it_map = { 0: 'cow', 1: 'horse', 2: 'rabbit', 3: 'rat', 5: 'monkey', 4: 'dog', 6: 'tiger', 7: 'snake', 8: 'cock', 9: 'pig', } def calc_zodiac(i): symbols = spot_it_rules[i%5] # mix it up a bit symbols = [x for x in itertools.permutations(symbols)][i%4] # get the english name #symbols = [spot_it_map[x] for x in symbols] # every 5, rotate them offset = int(i/5) symbols = symbols[offset:] + symbols[:offset] return tuple(symbols) def zjoin_2(card_a, card_b): match_a_index = None match_b_index = None for a_index, a_val in enumerate(card_a): for b_index, b_val in enumerate(card_b): if a_val == b_val: match_a_index = a_index match_b_index = b_index print ' ' * (4 - match_a_index), card_a print ' ' * (4 - match_b_index), card_b return match_a_index, match_b_index def join_3_in_a_line(card_a, card_b, card_c): print '---' def line_2(card_a, card_b): match_a_index, match_b_index = zjoin_2(card_a, card_b) line_a_index = (match_a_index + 2)%len(card_a) line_b_index = (match_b_index + 2)%len(card_b) #print 'avail', card_a, 'index', line_a_index, 'value', card_a[line_a_index] #print 'avail', card_b, 'index', line_b_index, 'value', card_b[line_b_index] return card_a[line_a_index], card_b[line_b_index] possible_a, possible_b = line_2(card_a, card_b) win_a_b_c = possible_a in card_c or possible_b in card_c print card_a, card_b, card_c, (win_a_b_c and 'WIN A B C') possible_a, possible_c = line_2(card_a, card_c) win_a_c_b = possible_a in card_b or possible_c in card_b print card_a, card_c, card_b, (win_a_c_b and 'WIN A C B') possible_b, possible_c = line_2(card_b, card_c) win_b_c_a = possible_b in card_a or possible_c in card_a print card_b, card_c, card_a, (win_b_c_a and 'WIN B C A') return win_a_b_c, win_a_c_b, win_b_c_a def join_3_in_a_triangle(card_a, card_b, card_c): print '---' def tri_2(a, b): match_a_index, match_b_index = zjoin_2(a, b) tri_index_pair1 = ( a[(match_a_index + 1)%len(a)], b[(match_b_index + 1)%len(b)], ) tri_index_pair2 = ( a[(match_a_index - 1)%len(a)], b[(match_b_index - 1)%len(b)], ) return tri_index_pair1, tri_index_pair2 def edges(card): return card[:2], card[1:3], card[2:4], (card[3], card[0]) pair1, pair2 = tri_2(card_a, card_b) if pair1 in edges(card_c) or pair2 in edges(card_c): print edges(card_c), 'needles', pair1, pair2 print card_a, card_b, card_c, 'WIN A B C' return True pair1, pair2 = tri_2(card_a, card_c) if pair1 in edges(card_b) or pair2 in edges(card_b): print edges(card_b), 'needles', pair1, pair2 print card_a, card_b, card_c, 'WIN A C B' return True pair1, pair2 = tri_2(card_b, card_c) if pair1 in edges(card_a) or pair2 in edges(card_a): print edges(card_a), 'needles', pair1, pair2 print card_a, card_b, card_c, 'WIN B C A' return True def zjoin_all(fn = join_3_in_a_line): zodiac_deck = [calc_zodiac(x) for x in range(20)] win_results = [] for i in range(20): for j in range(20): if i == j: continue for k in range(20): if k in [i,j]: continue wins = fn(zodiac_deck[i], zodiac_deck[j], zodiac_deck[k]) win_results.append(wins) return win_results def analyze_exes_and_checkmarks(svg=False, lost_stamina=0, flashback_percent=0.0): a_deck = [x['a'] for x in cards] b_deck = [x['b'] for x in cards] c_deck = [x['c'] for x in cards] d_deck = [x['d'] for x in cards] all_decks = { 'A': a_deck, 'B': b_deck, 'C': c_deck, 'D': d_deck, } fns = { -3: lambda deck: min(four_flip(deck)), -2: lambda deck: min(three_flip(deck)), -1: lambda deck: min(two_flip(deck)), 0: lambda deck: one_flip(deck), 1: lambda deck: max(two_flip(deck)), 2: lambda deck: max(three_flip(deck)), } results = { 1: 0, 2: 0, 3: 0, 4: 0, } possible_results = { 1: u'✗✗', 2: u'✗', 3: u'✔', 4: u'✔✔', } tries = 20000 green_tokens = 0 all_percents = [] for side in ['a', 'b', 'c', 'd']: print '' print 'Side ', side, ' FB %', flashback_percent print '' #for mod in [-2, -1, 0, 1, 2]: for mod in [ 0]: mod_results = results.copy() for i in range(tries): deck_copy = Deckahedron[:] for x in range(lost_stamina): # Remove one Stamina deck_copy.pop( random.randint(10,len(deck_copy)-1) ) fn = fns[mod] card = fn(deck_copy) card.SIDE = side # But maybe the player does a flashback... if i%100 == 0: green_tokens = 0 if ( flashback_percent and random.random() <= flashback_percent and card.read_result() in [1] and green_tokens >= 2 ): green_tokens -= 2 new_mod = min(2, mod + 2) new_fn = fns[mod] card = fn(deck_copy) card.SIDE = side if card._data['Pro']: green_tokens += 1 mod_results[card.read_result()] += 1 if svg: group_id = '%s-mod%s' % (side, mod) print '<g id="%s">' % group_id r = '''<rect style="fill:#{color};stroke:none;" id="{rect_id}" width="{width}" height="4" x="{x}" y="{y}" /> ''' t = '''<text xml:space="preserve" id="{text_id}" x="{x}" y="{y}" style="font-size:2px; font-family:'Bebas Neue'; -inkscape-font-specification:'Bebas Neue'; fill:#000000 " > <tspan sodipodi:role="line" x="{x}" y="{y}" >{value}</tspan> </text> ''' x = 0 for j in [1,2,3,4]: y = { 'a': 21, 'b': 14, 'c': 7, 'd': 0 }[side] color = { 1: 'd40000', 2: 'ff0000', 3: '55d400', 4: '44aa00', }[j] int_val = int(round((100*mod_results[j])/float(tries))) print r.format( rect_id=(group_id + '-' + str(j)), width=int_val, x=x, y=y, color=color, ) if int_val > 0: print t.format( text_id=('text' + group_id + '-' + str(j)), x=x*0.4, y=y+5, value=int_val ) x += int_val print '</g>' else: print 'mod: ', mod print '✗✗', pct(mod_results[1], tries) print '✗ ', pct(mod_results[2], tries) print '✔ ', pct(mod_results[3], tries) print '✔✔', pct(mod_results[4], tries) percents = { x:pct(mod_results[x], tries) for x in mod_results.keys() } all_percents.append(percents) return all_percents
<filename>sensha_uncompiled_version05-06-2019/main.py # main.py """ Importe le code du fichier <<display.py>> """ from display import * """ Programme - But : la base des operations du programme - Fonctionnement : contient le jeu, les variables, etc... / permet de demarrer et de fermer le jeu correctement (avec sauvegarde base de donnee) / partage ses variables a toutes les fonctions liees - Utilisation : / """ class Prgm: """ Fonction d'initialisation: - Fonctionnement : Initialise toutes les valeurs/listes/bibliotheques """ def __init__(self): # One ruler self.Data_Check() self.data = {} self.Data_Assign() # ex print(self.data["sound_lvl"]) pg.init() pg.mixer.init() pg.mixer.set_num_channels(50) pg.font.init() if self.data["full_screen"] == 0: self.window = pg.display.set_mode((self.data["screen_width"],self.data["screen_height"]),pg.HWSURFACE|pg.DOUBLEBUF) if self.data["full_screen"] == 1: self.window = pg.display.set_mode((self.data["screen_width"],self.data["screen_height"]),pg.HWSURFACE|pg.DOUBLEBUF|pg.FULLSCREEN) icon = pg.image.load("files/img/sprites/icone.png").convert_alpha() icon = pg.transform.scale(icon, (32, 32)) pg.display.set_icon(icon) pg.display.set_caption(TITLE) self.running = True self.clock = pg.time.Clock() # FPS stabilizer factor self.fps_stab = 1 # Get Own virtual class here self.all_virtuals = Virtuals(self, 10) # def sprite library to be filled ["..."] self.sprite = {} self.sounds = [] self.build_phase = [] self.sound_repertoire = [] MusicPlayer(self) self.map_instance = 0 self.mouse_select_holder = "None" self.key_event = [] self.game_lvl = self.data["game_lvl"] self.cheat = False self.Resol_Check = Resolution_Check(self) Load_Page_Display(self) """ Fonction boucle principale: - Fonctionnement : s'execute toute les 1/60s (en boucle) """ def main_loop(self): self.clock.tick(FPS) if self.clock.get_fps() != 0: self.fps_stab = FPS/self.clock.get_fps() if self.fps_stab < 1 or self.clock.get_fps() < 20: self.fps_stab = 1 else: self.fps_stab = 1 self.window.fill(BLACK) self.event() self.Parametrics() self.all_virtuals.update() #print(self.glob.all_sprites,"\n" ,self.glob.all_virtuals) self.run_check() pg.display.flip() # User functions """ Fonction Parametrics: - Fonctionnement : sauvegarde la position vectorielle du curseur dans la variable self.mouse_pos """ def Parametrics(self): mouse_pos = pg.mouse.get_pos() self.mouse_pos = vec(mouse_pos[0], mouse_pos[1]) """ Fonction event: - Fonctionnement : attends un evenement donné (touche du clavier ou croix pour quitter) / ajoute le nom de la touche pressee a la liste key_event (qui est videe a chaque boucle), cette liste pourra etre lu par d'autres fonctions de classes virtuelles """ def event(self): if len(self.key_event) > 0: self.key_event.clear() for event in pg.event.get(): if event.type == pg.QUIT: self.running = False if event.type == pg.KEYUP: if event.key == pg.K_ESCAPE: self.key_event.append("k_escape") if event.type == pg.MOUSEBUTTONUP: self.key_event.append("mouse_press") # Technical functions """ Fonction Data_Assign: - Fonctionnement : transfert les donnees de la base de donnees <<session.db>> vers la bibliotheque self.data """ def Data_Assign(self): DataBase().db_dict_get(self.data) """ Fonction Data_Save: - Fonctionnement : transfert une donnee precise de la bibliotheque self.data vers la base de donnees <<session.db>> """ def Data_Save(self, name): DataBase().db_update(name,self.data[name]) """ Fonction Data_Save_All: - Fonctionnement : transfert toutes les donnees de la bibliotheque self.data vers la base de donnees <<session.db>> """ def Data_Save_All(self): DataBase().db_dict_update(self.data) """ Fonction Data_Check: - Fonctionnement : verifie si la base de donnees <<session.db>> existe, si non alors elle la cree """ def Data_Check(self): if DataBase().db_check() is False: DataBase().db_spawn() # DataBase().db_update("full_screen", 0) """ Fonction run_check: - Fonctionnement : verifie si la variable self.running est toujours vrai, si non elle appelle la fonction Data_Save_All() """ def run_check(self): if self.running is False: print("QUIT") self.Data_Save_All() # apply data lift/save p = Prgm() """ Boucle principale: - Fonctionnement : tant que p.running vraie, lit moi la fonction p.main_loop(), si non quitte le programme """ while p.running: p.main_loop() pg.quit()
import discord from discord.ext import commands from dataIO import dataIO import logging from tabulate import tabulate import Checker import os log = logging.getLogger('blagotron.buyrole') class Buyrole: """Allows the user to buy a role with economy balance""" # --- Format # { # Server : { # Toggle: True/False # Roles : { # Price : # Name : # } # } # } # --- def __init__(self, bot): self.bot = bot self.json = {} self.location = 'data/buyrole/settings.json' self.json = dataIO.load_json(self.location) @commands.command(pass_context=True, no_pm=True) async def buyrole(self, ctx, role: discord.Role=None): """Buy a role of your choice with your hard earned balance To buy a role with a space in it, use quotes""" economy = self.bot.get_cog('Economy').bank server = ctx.message.server.id author = ctx.message.author if server not in self.json: await self.bot.say(':warning: Buyrole isn\'t setup yet. Please ask your admin to set it up.') elif self.json[server]['toggle'] is False: await self.bot.say(':warning: Buyrole is disabled on this server.') else: if role is None: table = [] for key, role in self.json[server].items(): try: temp = [] temp.append(self.json[server][key]['name']) temp.append(self.json[server][key]['price']) table.append(temp) # Past the list into a new list, thats a collection of lists. except: pass header = ['Role', 'Price'] if not table: await self.bot.say(':warning: No roles are setup yet.') else: await self.bot.say('```\n{}```'.format(tabulate(table, headers=header, tablefmt='simple'))) else: if role.id in self.json[server]: if role in author.roles: await self.bot.say(':warning: {}, you already own this role!'.format(author.display_name)) elif economy.can_spend(author, int(self.json[server][role.id]['price'])): msg = 'This role costs {}.\nAre you sure you want to buy this role?\nType *"Yes"* to confirm.' log.debug('Starting check on UserID({})'.format(author.id)) await self.bot.say(msg.format(self.json[server][role.id]['price'])) answer = await self.bot.wait_for_message(timeout=15, author=author) if answer is None: await self.bot.say(':warning: {}, you didn\'t respond in time.'.format(author.display_name)) log.debug('Killing check on UserID({}) (Timeout)'.format(author.id)) elif 'yes' in answer.content.lower() and role.id in self.json[server]: try: economy.withdraw_credits(author, int(self.json[server][role.id]['price'])) await self.bot.add_roles(author, role) await self.bot.say(':white_check_mark: Done! You\'re now the proud owner of {}'.format(self.json[server][role.id]['name'])) log.debug('Killing check on UserID({}) (Complete)'.format(author.id)) except discord.Forbidden: await self.bot.say(":warning: I cannot manage server roles, or the role/user is higher then my role.\nPlease check the server roles to solve this.") else: await self.bot.say(':warning: {}, ok you can try again later.'.format(author.display_name)) else: await self.bot.say(':warning: Sorry {}, you don\'t have enough credits to buy {}'.format(author.display_name, self.json[server][role.id]['name'])) else: await self.bot.say(':warning: {}, you cannot buy this role!'.format(role.name)) @commands.group(pass_context=True, no_pm=True) @Checker.admin_or_permissions(administrator=True) async def buyroleset(self, ctx): """Manage the settings for buyrole""" server = ctx.message.server.id if server not in self.json: # Setup the server in the dict, failur rate 0%. For now self.json[server] = {'toggle': True} dataIO.save_json(self.location, self.json) log.debug('Wrote server ID({})'.format(server)) @buyroleset.command(pass_context=True, no_pm=True) @Checker.admin_or_permissions(administrator=True) async def add(self, ctx, role: discord.Role, price): """Adds a role for users to buy To edit a role, use this command again, To add a role with a space in it put it in quotes,\"Role name\"""" server = ctx.message.server.id self.json[server][role.id] = {'price': price, 'name': role.name} dataIO.save_json(self.location, self.json) log.debug('Wrote role ID({}) in server ID({})'.format(role.id, server)) await self.bot.say(':white_check_mark: Added {} to the buy list for {}'.format(role.name, price)) @buyroleset.command(pass_context=True, no_pm=True) @Checker.admin_or_permissions(administrator=True) async def remove(self, ctx, role: discord.Role): """Removes a role for users to buy""" server = ctx.message.server.id try: del self.json[server][role.id] dataIO.save_json(self.location, self.json) log.debug('deleted role ID({}) in server ID({})'.format(role.id, server)) await self.bot.say(':white_check_mark: Done! Removed the role') except: await self.bot.say(':warning: {} isn\'t in the list.'.format(role.name)) @buyroleset.command(pass_context=True, no_pm=True) @Checker.admin_or_permissions(administrator=True) async def toggle(self, ctx): """Enables or disables buying roles in the server""" server = ctx.message.server.id if self.json[server]['toggle'] is True: self.json[server]['toggle'] = False await self.bot.say(':white_check_mark: Toggle disabled! You can no longer buy roles on this server') else: self.json[server]['toggle'] = True await self.bot.say(':white_check_mark: Toggle enabled! You can buy roles on this server now!') log.debug('Wrote toggle to {} in server ID({})'.format(self.json[server]['toggle'], server)) dataIO.save_json(self.location, self.json) @buyroleset.command(hidden=True, pass_context=True) @Checker.admin_or_permissions(administrator=True) async def dicts(self, ctx): """Dicks""" await self.bot.say('All the dicks!') async def _update_name(self, old, new): # Change the 'name' variable in the role ID. Since we don't pull names dynamicly in the table if new.server.id in self.json: if old.name != new.name: if new.id in self.json[new.server.id]: self.json[new.server.id][new.id]['name'] = new.name log.debug('Written new name to {}'.format(new.id)) dataIO.save_json(self.location, self.json) def check_folder(): if not os.path.exists('data/buyrole'): log.debug('Creating folder: data/buyrole') os.makedirs('data/buyrole') def check_file(): f = 'data/buyrole/settings.json' if dataIO.is_valid_json(f) is False: log.debug('Creating json: settings.json') dataIO.save_json(f, {}) def setup(bot): check_folder() check_file() n = Buyrole(bot) bot.add_listener(n._update_name, 'on_server_role_update') bot.add_cog(n)
<reponame>rajancolab/blogsite """Easily update version numbers across your project. """ import argparse from functools import reduce import re import sys import toml from . import deltas __version__ = "0.2" class ConfigError(ValueError): pass def read_config(): with open('reversion.toml') as f: conf = toml.load(f) if 'currentversion' not in conf: raise ConfigError("No field named currentversion") if not isinstance(conf['currentversion'], str): raise ConfigError("currentversion should be a string, not {}".format( type(conf['currentversion']))) places = conf.get('place', []) if not places: raise ConfigError("Need at least one replacement site ([[place]] section)") if not isinstance(places, list): raise ConfigError("place must be an array") if not isinstance(places[0], dict): raise ConfigError("place must be an array of tables") for place in places: if 'file' not in place: raise ConfigError("Missing file= field for place") if not isinstance(place['file'], str): raise ConfigError("file must be string") if ('line-regex' in place) and not isinstance(place['line-regex'], str): raise ConfigError("linematch must be string") return conf def check(): conf = read_config() current = conf['currentversion'] current_pattern = re.compile(r'\b' + re.escape(current) + r'\b') problems = [] for place in conf['place']: linematch = place.get('line-regex', 'version') line_pattern = re.compile(linematch) match_lines = [] try: with open(place['file']) as f: for lineno, line in enumerate(f, start=1): if line_pattern.search(line): m = current_pattern.search(line) if m is not None: match_lines.append(lineno) except FileNotFoundError: problems.append("No such file: " + place['file']) if not match_lines: problems.append("No match found in {} with pattern {!r}".format( place['file'], linematch )) elif len(match_lines) > 1: problems.append("{} matches found in {} with pattern {!r}: lines {}" .format(len(match_lines), place['file'], linematch, ', '.join(str(n) for n in match_lines))) return problems class CheckAction(argparse.Action): def __init__(self, option_strings, dest, **kwargs): super().__init__(option_strings, dest, nargs=0, **kwargs) def __call__(self, parser, namespace, values, option_string=None): try: problems = check() except ConfigError as e: sys.exit(str(e)) if problems: for p in problems: print(p) sys.exit(1) else: print('OK') sys.exit(0) class VersionMatchingError(ValueError): pass def update_version(changes): conf = read_config() current_pattern = re.compile(r'\b' + re.escape(conf['currentversion']) + r'\b') new_version = reduce(deltas.apply, changes, conf['currentversion']) reversion_conf_place = {'file': 'reversion.toml', 'linematch': 'currentversion'} files_changed = [] for place in (conf['place'] + [reversion_conf_place]): match_lines = [] line_pattern = re.compile(place.get('line-regex', 'version')) file = place['file'] contents = [] files_changed.append((file, contents)) with open(file) as f: for lineno, line in enumerate(f, start=1): if line_pattern.search(line): line, nsubs = current_pattern.subn(new_version, line) if nsubs > 1: raise VersionMatchingError('Multiple matches in {} at line {}' .format(file, lineno)) elif nsubs: match_lines.append(lineno) contents.append(line) if len(match_lines) == 0: raise VersionMatchingError('No matches found in {}' .format(file)) elif len(match_lines) > 1: raise VersionMatchingError('Multiple matches found in {} at lines {}' .format(file, ', '.join(str(n) for n in match_lines))) for filename, contents in files_changed: with open(filename, 'w') as f: f.writelines(contents) return len(files_changed) def main(argv=None): ap = argparse.ArgumentParser(prog='reversion') ap.add_argument('--version', action='version', version=__version__) ap.add_argument('--check', action=CheckAction, help="Check version numbers in project files without changing anything") ap.add_argument('change', nargs='+') options = ap.parse_args(argv) try: n = update_version(options.change) except (ConfigError, deltas.BadDelta, VersionMatchingError) as e: print('Error:', e) else: print('Updated version number in %d files' % n)
#!/usr/bin/python3 # coding: utf-8 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # class LoeschLeser forked from the FINC Project # README # needs an config file 'deletions_conf.json' #{ #"getstrings": { #"http://127.0.0.1/finc-main/mrc/_search": { #"body": null, #"method": "POST" #}, #"http://127.0.0.1/events/schemaorg/": { #"method": "GET" #}, #"http://127.0.0.1/geo/schemaorg/": { #"method": "GET" #}, #"http://127.0.0.1/orga/schemaorg/": { #"method": "GET" #}, #"http://127.0.0.1/persons/schemaorg/": { #"method": "GET" #}, #"http://127.0.0.1/swb-aut/mrc/": { #"method": "GET" #}, #"http://127.0.0.1/tags/schemaorg/": { #"method": "GET" #}, #"http://127.0.0.1/works/schemaorg/": { #"method": "GET" #} #}, #"host": "http://127.0.0.1", #"password": "<PASSWORD>", #"url": "ftp://vftp.bsz-bw.de/sekkor/LOEPPN-*", #"username": "da" #} import luigi import luigi.contrib.esindex from gluish.task import BaseTask,ClosestDateParameter from gluish.utils import shellout from es2json import eprint, put_dict try: import simplejson as json except ImportError: import json import datetime from datetime import date import time, os import sys import traceback from requests import get, delete class DeleteTask(BaseTask): """ Just a base class for DeleteStuff """ date = str(date.today().strftime("%y%m%d")) with open('deletions_conf.json') as data_file: config = json.load(data_file) def closest(self): return daily(date=self.date) class getDelList(DeleteTask): def run(self): cmdstring="wget -P {date}-delPPN -rnd --user {username} --password {password} {url}".format(**self.config,date=self.date) output=shellout(cmdstring) return 0 def output(self): return luigi.LocalTarget("{date}-delPPN".format(date=self.date)) def complete(self): return True if os.path.exists("{date}-delPPN".format(date=self.date)) else False class getDelPPNs(DeleteTask): def requires(self): return getDelList() def run(self): outputset=set() for f in os.listdir(self.date+"-delPPN/"): with open(self.date+"-delPPN/"+f) as handle: for line in handle: # dissect line __date = line[0:5] # YYDDD, WTF __time = line[5:11] # HHMMSS d_type = line[11:12] # xpn, since this could be ppn or epn; it is an epn, if d_type == 9; it is a ppn if d_type == A # 2018-05-17: #13108 longer EPNs ## __xpn = line[12:21] __xpn = line[12:22] ## __iln = line[21:25] # only in epns __iln = line[22:26] # only in epns if d_type == 'A': for url,conf in self.config.get("getstrings").items(): if conf.get("method")=="GET": print(__xpn.strip()) r=get(url+__xpn.strip()) if r.ok and r.json()["found"]: outputset.add(url[:25]+"/"+r.json()["_index"]+"/"+r.json()["_type"]+"/"+r.json()["_id"]) elif conf.get("method")=="POST": # TODO pass with open("{date}-toDelete.txt".format(date=self.date),"w") as outp: for ppn in outputset: outp.write(ppn) return 0 def output(self): return luigi.LocalTarget("{date}-toDelete.txt".format(date=self.date)) def complete(self): return True if os.path.isfile("{date}-toDelete.txt".format(date=self.date)) else False class DeletePPNsByFile(DeleteTask): def requires(self): return getDelPPNs() def run(self): with open("{date}-toDelete.txt".format(date=self.date),"r") as inp: for url in inp: print(url.strip()) #delete(url.strip()) def complete(self): try: with open("{date}-toDelete.txt".format(date=self.date),"r") as inp: for url in inp: r=get(url.strip()) if not r.status_code==404: return False return True except FileNotFoundError: return False
<reponame>auto-bwcx-me/scenario_runner #!/usr/bin/env python # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. """ Basic CARLA Autonomous Driving training scenario """ import py_trees from srunner.scenarioconfigs.route_scenario_configuration import RouteConfiguration from srunner.scenariomanager.scenarioatomics.atomic_behaviors import Idle from srunner.scenariomanager.scenarioatomics.atomic_criteria import (CollisionTest, InRouteTest, OnSidewalkTest, RouteCompletionTest, RunningRedLightTest, RunningStopTest, WrongLaneTest) from srunner.scenarios.basic_scenario import BasicScenario class MasterScenario(BasicScenario): """ Implementation of a Master scenario that controls the route. This is a single ego vehicle scenario """ radius = 10.0 # meters def __init__(self, world, ego_vehicles, config, randomize=False, debug_mode=False, criteria_enable=True, timeout=300): """ Setup all relevant parameters and create scenario """ self.config = config self.target = None self.route = None # Timeout of scenario in seconds self.timeout = timeout if hasattr(self.config, 'target'): self.target = self.config.target else: raise ValueError("Master scenario must have a target") if hasattr(self.config, 'route'): self.route = self.config.route else: raise ValueError("Master scenario must have a route") super(MasterScenario, self).__init__("MasterScenario", ego_vehicles=ego_vehicles, config=config, world=world, debug_mode=debug_mode, terminate_on_failure=True, criteria_enable=criteria_enable) def _create_behavior(self): """ Basic behavior do nothing, i.e. Idle """ # Build behavior tree sequence = py_trees.composites.Sequence("MasterScenario") idle_behavior = Idle() sequence.add_child(idle_behavior) return sequence def _create_test_criteria(self): """ A list of all test criteria will be created that is later used in parallel behavior tree. """ if isinstance(self.route, RouteConfiguration): route = self.route.data else: route = self.route collision_criterion = CollisionTest(self.ego_vehicles[0], terminate_on_failure=False) route_criterion = InRouteTest(self.ego_vehicles[0], radius=30.0, route=route, offroad_max=20, terminate_on_failure=True) completion_criterion = RouteCompletionTest(self.ego_vehicles[0], route=route) wrong_way_criterion = WrongLaneTest(self.ego_vehicles[0]) onsidewalk_criterion = OnSidewalkTest(self.ego_vehicles[0]) red_light_criterion = RunningRedLightTest(self.ego_vehicles[0]) stop_criterion = RunningStopTest(self.ego_vehicles[0]) parallel_criteria = py_trees.composites.Parallel("group_criteria", policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE) parallel_criteria.add_child(completion_criterion) parallel_criteria.add_child(collision_criterion) parallel_criteria.add_child(route_criterion) parallel_criteria.add_child(wrong_way_criterion) parallel_criteria.add_child(onsidewalk_criterion) parallel_criteria.add_child(red_light_criterion) parallel_criteria.add_child(stop_criterion) return parallel_criteria def __del__(self): """ Remove all actors upon deletion """ self.remove_all_actors()
from time import strftime import time import threading import sys from PySide2.QtWidgets import QWidget, QVBoxLayout, QApplication from PySide2.QtCore import Qt, QRectF from PySide2.QtGui import QColor, QFont, QImage, QPainter, QPen, QPainterPath, QConicalGradient, QGradient, QColor, \ QPalette, QGuiApplication from PySide2.QtWidgets import QDialog from log import Logger import util import config class QRoundProgressBar(QWidget): StyleDonut = 1 StylePie = 2 StyleLine = 3 PositionLeft = 180 PositionTop = 90 PositionRight = 0 PositionBottom = -90 UF_VALUE = 1 UF_PERCENT = 2 UF_MAX = 4 def __init__(self, console): super().__init__() self.min = 0 self.max = 100 self.value = 25 self.nullPosition = self.PositionTop self.barStyle = self.StyleDonut self.outlinePenWidth = 1 self.dataPenWidth = 1 self.rebuildBrush = False self.format = "%p%" self.decimals = 1 self.updateFlags = self.UF_PERCENT self.gradientData = [] self.donutThicknessRatio = 0.75 self.console = console def setRange(self, min, max): self.min = min self.max = max if self.max < self.min: self.max, self.min = self.min, self.max if self.value < self.min: self.value = self.min elif self.value > self.max: self.value = self.max if not self.gradientData: self.rebuildBrush = True self.update() def setMinimun(self, min): self.setRange(min, self.max) def setMaximun(self, max): self.setRange(self.min, max) def setValue(self, val): if self.value != val: if val < self.min: self.value = self.min elif val > self.max: self.value = self.max else: self.value = val self.update() def setNullPosition(self, position): if position != self.nullPosition: self.nullPosition = position if not self.gradientData: self.rebuildBrush = True self.update() def setBarStyle(self, style): if style != self.barStyle: self.barStyle = style self.update() def setOutlinePenWidth(self, penWidth): if penWidth != self.outlinePenWidth: self.outlinePenWidth = penWidth self.update() def setDataPenWidth(self, penWidth): if penWidth != self.dataPenWidth: self.dataPenWidth = penWidth self.update() def setDataColors(self, stopPoints): if stopPoints != self.gradientData: self.gradientData = stopPoints self.rebuildBrush = True self.update() def setFormat(self, format): if format != self.format: self.format = format self.valueFormatChanged() def resetFormat(self): self.format = '' self.valueFormatChanged() def setDecimals(self, count): if count >= 0 and count != self.decimals: self.decimals = count self.valueFormatChanged() def setDonutThicknessRatio(self, val): self.donutThicknessRatio = max(0., min(val, 1.)) self.update() def paintEvent(self, event): outerRadius = min(self.width(), self.height()) baseRect = QRectF(1, 1, outerRadius - 2, outerRadius - 2) buffer = QImage(outerRadius, outerRadius, QImage.Format_ARGB32) buffer.fill(0) p = QPainter(buffer) p.setRenderHint(QPainter.Antialiasing) # data brush self.rebuildDataBrushIfNeeded() # background self.drawBackground(p, buffer.rect()) # base circle self.drawBase(p, baseRect) # data circle arcStep = 360.0 / (self.max - self.min) * self.value self.drawValue(p, baseRect, self.value, arcStep) # center circle innerRect, innerRadius = self.calculateInnerRect(baseRect, outerRadius) self.drawInnerBackground(p, innerRect) # text self.drawText(p, innerRect, innerRadius, self.value) # finally draw the bar p.end() painter = QPainter(self) painter.drawImage(0, 0, buffer) def drawBackground(self, p, baseRect): p.fillRect(baseRect, self.palette().window()) def drawBase(self, p, baseRect): bs = self.barStyle if bs == self.StyleDonut: # p.setPen(QtGui.QPen(self.palette().shadow().color(), self.outlinePenWidth)) p.setPen(QPen(self.palette().shadow().color(), -1)) # p.setBrush(self.palette().base()) p.setBrush(QColor(7, 93, 145)) p.drawEllipse(baseRect) elif bs == self.StylePie: p.setPen(QPen(self.palette().base().color(), self.outlinePenWidth)) p.setBrush(self.palette().base()) p.drawEllipse(baseRect) elif bs == self.StyleLine: p.setPen(QPen(self.palette().base().color(), self.outlinePenWidth)) p.setBrush(Qt.NoBrush) p.drawEllipse( baseRect.adjusted(self.outlinePenWidth / 2, self.outlinePenWidth / 2, -self.outlinePenWidth / 2, -self.outlinePenWidth / 2)) def drawValue(self, p, baseRect, value, arcLength): # nothing to draw if value == self.min: return # for Line style if self.barStyle == self.StyleLine: p.setPen(QPen(self.palette().highlight().color(), self.dataPenWidth)) p.setBrush(Qt.NoBrush) p.drawArc(baseRect.adjusted(self.outlinePenWidth / 2, self.outlinePenWidth / 2, -self.outlinePenWidth / 2, -self.outlinePenWidth / 2), self.nullPosition * 16, -arcLength * 16) return # for Pie and Donut styles dataPath = QPainterPath() dataPath.setFillRule(Qt.WindingFill) # pie segment outer dataPath.moveTo(baseRect.center()) dataPath.arcTo(baseRect, self.nullPosition, -arcLength) dataPath.lineTo(baseRect.center()) p.setBrush(self.palette().highlight()) p.setBrush(QColor(255, 255, 255, 255 * 0.3)) # pen = QtGui.QPen(self.palette().shadow().color(), self.dataPenWidth) pen = QPen(self.palette().shadow().color(), -1) p.setPen(pen) p.drawPath(dataPath) def calculateInnerRect(self, baseRect, outerRadius): # for Line style if self.barStyle == self.StyleLine: innerRadius = outerRadius - self.outlinePenWidth else: # for Pie and Donut styles innerRadius = outerRadius * self.donutThicknessRatio delta = (outerRadius - innerRadius) / 2. innerRect = QRectF(delta, delta, innerRadius, innerRadius) return innerRect, innerRadius def drawInnerBackground(self, p, innerRect): if self.barStyle == self.StyleDonut: p.setBrush(self.palette().alternateBase()) cmod = p.compositionMode() p.setCompositionMode(QPainter.CompositionMode_Source) p.drawEllipse(innerRect) p.setCompositionMode(cmod) def drawText(self, p, innerRect, innerRadius, value): if not self.format: return text = self.valueToText(value) # !!! to revise # f = self.font() f = QFont() f.setFamily("微軟正黑體") # f.setPixelSize(innerRadius * max(0.05, (0.35 - self.decimals * 0.08))) f.setPixelSize(60) p.setFont(f) textRect = innerRect p.setPen(self.palette().text().color()) p.drawText(textRect, Qt.AlignCenter, text) def valueToText(self, value): textToDraw = self.format format_string = '{' + ':.{}f'.format(self.decimals) + '}' if self.updateFlags & self.UF_VALUE: textToDraw = textToDraw.replace("%v", format_string.format(value)) if self.updateFlags & self.UF_PERCENT: percent = (value - self.min) / (self.max - self.min) * 100.0 textToDraw = textToDraw.replace("%p", format_string.format(percent)) if self.updateFlags & self.UF_MAX: m = self.max - self.min + 1 textToDraw = textToDraw.replace("%m", format_string.format(m)) return textToDraw def valueFormatChanged(self): self.updateFlags = 0; if "%v" in self.format: self.updateFlags |= self.UF_VALUE if "%p" in self.format: self.updateFlags |= self.UF_PERCENT if "%m" in self.format: self.updateFlags |= self.UF_MAX self.update() def rebuildDataBrushIfNeeded(self): if self.rebuildBrush: self.rebuildBrush = False dataBrush = QConicalGradient() dataBrush.setCenter(0.5, 0.5) dataBrush.setCoordinateMode(QGradient.StretchToDeviceMode) for pos, color in self.gradientData: dataBrush.setColorAt(1.0 - pos, color) # angle dataBrush.setAngle(self.nullPosition) p = self.palette() p.setBrush(QPalette.Highlight, dataBrush) self.setPalette(p) def mouseDoubleClickEvent(self, event): clipboard = QGuiApplication.clipboard() clipboard.setText(self.format.strip()) self.console.system_alert('驗證碼已經複製到剪貼簿') class Form(QDialog): def __init__(self, console): super(type(self), self).__init__() self.bar = QRoundProgressBar(console) self.bar.setFixedSize(300, 300) self.bar.setDataPenWidth(0) self.bar.setOutlinePenWidth(0) self.bar.setDonutThicknessRatio(0.92) self.bar.setDecimals(0) self.bar.setNullPosition(90) self.bar.setBarStyle(QRoundProgressBar.StyleDonut) self.bar.setDataColors([(0., QColor.fromRgb(65, 105, 225))]) self.bar.setRange(0, 29) self.setWindowTitle(f'{console.ptt_id} 驗證碼') lay = QVBoxLayout() lay.addWidget(self.bar) self.setLayout(lay) self.console = console self.timer_thread = None self.call_close = False self.logger = Logger('Progress', Logger.INFO) self.setWindowIcon(util.load_icon(config.icon_small)) self.update_otp() def update_otp(self): data = self.console.current_otp self.logger.show_value(Logger.INFO, 'update_otp', data) current_data = f'{data}' self.bar.setFormat(current_data) if self.timer_thread is None: self.timer_thread = threading.Thread(target=self.timer) # self.timer_thread.daemon = True self.timer_thread.start() def timer(self): self.logger.show(Logger.INFO, '啟動計時器') while not self.call_close: current_sec = int(strftime("%S")) % 30 self.logger.show_value(Logger.INFO, 'current_sec', current_sec) for value in range(current_sec, 30): self.bar.setValue(value) self.logger.show_value(Logger.TRACE, 'value', value) temp_sec = value if self.call_close: break while temp_sec == value: if self.call_close: break time.sleep(0.05) temp_sec = int(strftime("%S")) % 30 # self.logger.show_value(Logger.INFO, 'temp_sec', temp_sec) self.logger.show(Logger.INFO, 'timer finish') def close_form(self): self.logger.show(Logger.INFO, '1') self.call_close = True self.logger.show(Logger.INFO, '2') time.sleep(0.5) self.logger.show(Logger.INFO, '3') self.bar.close() self.logger.show(Logger.INFO, '4') self.close() self.logger.show(Logger.INFO, '5') def closeEvent(self, event): self.logger.show(Logger.INFO, '直接關閉') self.console.system_alert('背景執行中') def update_thread(): for i in range(10): print('===================================') dlg.update_otp('12345' + str(i)) sleep_time = 30 - (int(strftime("%S")) % 30) print(f'sleep {sleep_time}') time.sleep(sleep_time) if __name__ == "__main__": app = QApplication(sys.argv) dlg = Form() dlg.show() thread = threading.Thread(target=update_thread) thread.daemon = True thread.start() sys.exit(app.exec_())
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2014-2018 GEM Foundation # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. """ Module :mod:`openquake.hazardlib.geo.surface.gridded` defines :class:`GriddedSurface`. """ import numpy as np from openquake.baselib.node import Node from openquake.hazardlib.geo import utils from openquake.hazardlib.geo.point import Point from openquake.hazardlib.geo.surface.base import BaseSurface from openquake.hazardlib.geo.mesh import Mesh class GriddedSurface(BaseSurface): """ Gridded surface defined by an unstructured cloud of points. This surface type is required for a proper implementation of some subduction interface surfaces included int the Japan 2012 model. Note that currently we support only one rupture-site typology i.e. since this the only one that can be unambiguosly computed. :param mesh: An unstructured mesh of points ideally representing a rupture surface. Must be an instance of :class:`~openquake.hazardlib.geo.mesh.Mesh` """ @property def surface_nodes(self): """ :param points: a list of Point objects :returns: a Node of kind 'griddedSurface' """ line = [] for point in self.mesh: line.append(point.longitude) line.append(point.latitude) line.append(point.depth) return [Node('griddedSurface', nodes=[Node('gml:posList', {}, line)])] @classmethod def from_points_list(cls, points): """ Create a gridded surface from a list of points. :parameter points: A list of :class:`~openquake.hazardlib.geo.Point` :returns: An instance of :class:`~openquake.hazardlib.geo.surface.gridded.GriddedSurface` """ return cls(Mesh.from_points_list(points)) def get_bounding_box(self): """ Compute surface geographical bounding box. :return: A tuple of four items. These items represent western, eastern, northern and southern borders of the bounding box respectively. Values are floats in decimal degrees. """ return utils.get_spherical_bounding_box(self.mesh.lons, self.mesh.lats) def get_surface_boundaries(self): """ :returns: (min_max lons, min_max lats) """ min_lon, min_lat, max_lon, max_lat = self.get_bounding_box() return [[min_lon, max_lon]], [[min_lat, max_lat]] def get_rx_distance(self, mesh): """ Compute distance between each point of mesh and surface's great circle arc. Distance is measured perpendicular to the rupture strike, from the surface projection of the updip edge of the rupture, with the down dip direction being positive (this distance is usually called ``Rx``). In other words, is the horizontal distance to top edge of rupture measured perpendicular to the strike. Values on the hanging wall are positive, values on the footwall are negative. :param mesh: :class:`~openquake.hazardlib.geo.mesh.Mesh` of points to calculate Rx-distance to. :returns: Numpy array of distances in km. """ raise NotImplementedError def get_top_edge_depth(self): """ Compute minimum depth of surface's top edge. :returns: Float value, the vertical distance between the earth surface and the shallowest point in surface's top edge in km. """ raise NotImplementedError def get_strike(self): """ Compute surface's strike as decimal degrees in a range ``[0, 360)``. The actual definition of the strike might depend on surface geometry. :returns: numpy.nan, not available for this kind of surface (yet) """ return np.nan def get_dip(self): """ Compute surface's dip as decimal degrees in a range ``(0, 90]``. The actual definition of the dip might depend on surface geometry. :returns: numpy.nan, not available for this kind of surface (yet) """ return np.nan def get_width(self): """ Compute surface's width (that is surface extension along the dip direction) in km. The actual definition depends on the type of surface geometry. :returns: Float value, the surface width """ raise NotImplementedError def get_area(self): """ Compute surface's area in squared km. :returns: Float value, the surface area """ raise NotImplementedError def get_middle_point(self): """ Compute coordinates of surface middle point. The actual definition of ``middle point`` depends on the type of surface geometry. :return: instance of :class:`openquake.hazardlib.geo.point.Point` representing surface middle point. """ lons = self.mesh.lons.squeeze() lats = self.mesh.lats.squeeze() depths = self.mesh.depths.squeeze() lon_bar = lons.mean() lat_bar = lats.mean() idx = np.argmin((lons - lon_bar)**2 + (lats - lat_bar)**2) return Point(lons[idx], lats[idx], depths[idx]) def get_ry0_distance(self, mesh): """ :param mesh: :class:`~openquake.hazardlib.geo.mesh.Mesh` of points """ raise NotImplementedError
# Copyright 2020, Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Methods for operating the geo classes. We store the prefix tree using pygtrie objects. Initially we consider user's coordinate as an (x,y) tuple. We then compute a binary version of this tuple, e.g. (x=12, y=5) => (1100, 0101) creates a prefix: ‘10/11/00/01’. We keep the counts using vectors with positions corresponding to the ids of the leafs in the tree. For each leaf we implement a conversion process into either the coordinate on some level or a region on the lowest level. """ import dataclasses import random from typing import List, Any from tqdm import tqdm import numpy as np import pygtrie from sketches import CountMinSketch, hash_function depth = 20 width = 2000 hash_functions = [hash_function(i) for i in range(depth)] sum_sketch = CountMinSketch(depth, width, hash_functions) # count_min = False DEFAULT_CHILDREN = ['00', '01', '10', '11'] def get_default_children(positivity, split=None): if positivity: if split == 'pos': return ['001', '011', '101', '111'] elif split == 'neg': return ['000', '010', '100', '110'] else: return ['000', '001', '010', '011', '100', '101', '110', '111'] else: return ['00', '01', '10', '11'] @dataclasses.dataclass class AlgResult: """Main result object. Attributes: image: resulting reassembled image sum_vector: a vector of reports on the tree leaves. tree: a prefix trie used to convert the sum_vector into image. tree_prefix_list: a reverse prefix matching vector coordinates to the trie. threshold: threshold parameter used to obtain the current tree. grid_contour: image showing the tree leafs locations on the map. eps: current value of the epsilon in SecAgg round. """ image: np.ndarray sum_vector: np.ndarray tree: pygtrie.StringTrie tree_prefix_list: List[str] threshold: float grid_contour: np.ndarray eps: float pos_image: np.ndarray = None neg_image: np.ndarray = None metric: Any = None sampled_metric: Any = None def coordinates_to_binary_path(xy_tuple, depth=10): """Transform a coordinate tuple into a binary vector. We compute a binary version of the provided coordinate tuple, e.g. (x=12, y=5) => (1100, 0101) creates a prefix: ‘10/11/00/01’. Args: xy_tuple: a tuple of (x,y) coordinates of the user location. depth: desired length of the binary vector, e.g. max depth of the tree. Returns: binary version of the coordinate. """ if len(xy_tuple) == 2: x_coord, y_coord = xy_tuple positivity = False pos = '' else: x_coord, y_coord, pos = xy_tuple path = '' for j in reversed(range(depth)): path += f'{(x_coord >> j) & 1}{(y_coord >> j) & 1}{pos}/' path = path[:-1] return path def binary_path_to_coordinates(path): """Using tree path to the leaf node retrieve (x, y) coordinates. Reassembles the path into coordinates. Note that if the path is shorter, e.g. for leafs closer to the tree root, the (x, y) coordinates would be w.r.t. to the image of the size 2^b x 2^b, where b = `path coordinate bits`. Args: path: binary path of the location ('00/01') Returns: x coordinate, y coordinate, total bit level, pos """ x = 0 y = 0 pos = None splitted_path = path.split('/') for xy in splitted_path: x = x << 1 y = y << 1 x += int(xy[0]) y += int(xy[1]) if len(xy) > 2: pos = int(xy[2]) return x, y, len(splitted_path), pos def report_coordinate_to_vector(xy, tree, tree_prefix_list, count_min): """Converts a coordinate tuple into a one-hot vector using tree.""" path = coordinates_to_binary_path(xy) (sub_path, value) = tree.longest_prefix(path) if count_min: sketch = CountMinSketch(depth, width, hash_functions) sketch.add(sub_path) # print(sub_path, sketch.query(sub_path)) vector = sketch.get_matrix() else: vector = np.zeros([len(tree_prefix_list)]) vector[value] += 1 return vector def init_tree(positivity=False): """Initializes tree to have four leaf nodes. Creates pgtrie with leafs from `DEFAULT_CHILDREN` and assigns each node a positional identifier using positions from the `DEFAULT_CHILDREN`. Args: positivity: Whether to account for pos and neg users. Returns: constructed pygtrie, reverse prefix of the trie. """ new_tree = pygtrie.StringTrie() for i, z in enumerate(get_default_children(positivity)): new_tree[z] = i return new_tree, list(get_default_children(positivity)) def transform_region_to_coordinates(x_coord, y_coord, prefix_len, image_bit_level=10): """Transforms (x,y)-bit region into a square for a final level. This method converts a leaf on some level `prefix_len` to a square region at the final level `2^image_bit_level`. For example, a first leaf on the smallest prefix 2x2 will occupy (0:512, 0:512) region of the 10-bit image. Args: x_coord: y_coord: prefix_len: image_bit_level: Returns: A square region coordinates. """ shift = image_bit_level - prefix_len x_bot = x_coord << shift x_top = ((x_coord + 1) << shift) - 1 y_bot = y_coord << shift y_top = ((y_coord + 1) << shift) - 1 return (x_bot, x_top, y_bot, y_top) def rebuild_from_vector(vector, tree, image_size, contour=False, threshold=0, positivity=False, count_min=False): """Using coordinate vector and the tree produce a resulting image. For each value in the vector it finds the corresponding prefix and plots the value of the vector on a square region of the final image. Args: vector: data vector from the accumulated responses. tree: current tree object image_size: desired final resolution of the image. contour: release only the contours of the grid (for debugging) threshold: reduces noise by setting values below threshold to 0. positivity: produce two images with positive and negative cases. count_min: use count min sketch. Returns: image of the size `image_size x image_size` """ image_bit_level = int(np.log2(image_size)) current_image = np.zeros([image_size, image_size]) pos_image, neg_image = None, None if positivity: pos_image = np.zeros([image_size, image_size]) neg_image = np.zeros([image_size, image_size]) for path in sorted(tree): if count_min: value = sum_sketch.query(path) else: value = vector[tree[path]] (x, y, prefix_len, pos) = binary_path_to_coordinates(path) (x_bot, x_top, y_bot, y_top) = transform_region_to_coordinates(x, y, prefix_len, image_bit_level) if value < threshold: value = 0 count = value / 2 ** (1 * (image_bit_level - prefix_len)) # Build a grid image without filling the regions. if contour: current_image[x_bot:x_top + 1, y_bot - max(1, 5 // prefix_len):y_bot + max(1, 5 // prefix_len)] = 1 current_image[x_bot:x_top + 1, y_top - max(1, 5 // prefix_len):y_top + 10 // prefix_len] = 1 current_image[ x_bot - max(1, 5 // prefix_len):x_bot + 10 // prefix_len, y_bot:y_top + 1] = 1 current_image[ x_top - max(1, 5 // prefix_len):x_top + 10 // prefix_len, y_bot:y_top + 1] = 1 else: current_image[x_bot:x_top + 1, y_bot:y_top + 1] += count if positivity: if pos == 1: pos_image[x_bot:x_top + 1, y_bot:y_top + 1] = count elif pos == 0: neg_image[x_bot:x_top + 1, y_bot:y_top + 1] = count else: raise ValueError(f'value: {pos}') return current_image, pos_image, neg_image def split_regions(tree_prefix_list, vector_counts, threshold, image_bit_level, collapse_threshold=None, positivity=False, expand_all=False, last_result: AlgResult=None, count_min=False): """Modify the tree by splitting and collapsing the nodes. This implementation collapses and splits nodes of the tree according to the received responses of the users. If there are no new nodes discovered the finished flag is returned as True. Args: tree_prefix_list: matches vector id to the tree prefix. vector_counts: vector values aggregated from the users. threshold: threshold value used to split the nodes. image_bit_level: stopping criteria once the final resolution is reached. collapse_threshold: threshold value used to collapse the nodes. Returns: new_tree, new_tree_prefix_list, finished """ collapsed = 0 created = 0 fresh_expand = 0 unchanged = 0 intervals = list() new_tree_prefix_list = list() new_tree = pygtrie.StringTrie() if positivity: for i in range(0, len(tree_prefix_list), 2): if expand_all: neg_count = threshold + 1 pos_count = threshold + 1 else: neg_count = vector_counts[i] pos_count = vector_counts[i+1] neg_prefix = tree_prefix_list[i] pos_prefix = tree_prefix_list[i+1] # check whether the tree has reached the bottom if len(pos_prefix.split('/')) >= image_bit_level: continue # total = pos_count + neg_count # p = pos_count / total # confidence = np.sqrt((1-p)*p/total) # error bound propagation. # confidence +/- noise # pos_count/total +/- (confidence+conf_noise) => 95% interval for 95% noise interval. if pos_count > threshold and neg_count > threshold: neg_child = get_default_children(positivity, split='neg') pos_child = get_default_children(positivity, split='pos') for j in range(len(pos_child)): new_prefix = f'{neg_prefix}/{neg_child[j]}' if not new_tree.has_key(new_prefix): fresh_expand += 1 new_tree[new_prefix] = len(new_tree_prefix_list) new_tree_prefix_list.append(new_prefix) new_prefix = f'{pos_prefix}/{pos_child[j]}' new_tree[new_prefix] = len(new_tree_prefix_list) new_tree_prefix_list.append(new_prefix) else: if collapse_threshold is not None and \ (pos_count < collapse_threshold or neg_count < collapse_threshold) and \ len(pos_prefix) > 3 and len(neg_prefix) > 3: old_prefix = neg_prefix[:-4] collapsed += 1 if not new_tree.has_key(old_prefix): created += 1 new_tree[old_prefix] = len(new_tree_prefix_list) new_tree_prefix_list.append(old_prefix) old_prefix = pos_prefix[:-4] new_tree[old_prefix] = len(new_tree_prefix_list) new_tree_prefix_list.append(old_prefix) else: unchanged += 1 new_tree[f'{neg_prefix}'] = len(new_tree_prefix_list) new_tree_prefix_list.append(f'{neg_prefix}') new_tree[f'{pos_prefix}'] = len(new_tree_prefix_list) new_tree_prefix_list.append(f'{pos_prefix}') else: for i in range(len(tree_prefix_list)): if expand_all: count = threshold + 1 else: if count_min: count = sum_sketch.query(tree_prefix_list[i]) else: count = vector_counts[i] prefix = tree_prefix_list[i] # check whether the tree has reached the bottom if len(prefix.split('/')) >= image_bit_level: continue if last_result is not None: (last_prefix, last_prefix_pos) = last_result.tree.longest_prefix(prefix) if last_prefix is None: cond = False else: last_count = last_result.sum_vector[last_prefix_pos] p = (last_count - count)/last_count if p<=0 or count<5 or last_count<5: cond = False # print(last_prefix, prefix, last_prefix_pos, last_count, # count) else: conf_int = 1.96 * np.sqrt((p*(1-p)/last_count)) * last_count cond = conf_int < threshold intervals.append(conf_int) # print(last_prefix, prefix, last_prefix_pos, last_count, count, conf_int, cond) else: cond = count > threshold # print(cond, threshold, count) if cond: for child in DEFAULT_CHILDREN: new_prefix = f'{prefix}/{child}' if not new_tree.has_key(new_prefix): fresh_expand += 1 new_tree[new_prefix] = len(new_tree_prefix_list) new_tree_prefix_list.append(new_prefix) else: if collapse_threshold is not None and \ count <= collapse_threshold and \ len(prefix) > 2: old_prefix = prefix[:-3] collapsed += 1 if not new_tree.has_key(old_prefix): created += 1 new_tree[old_prefix] = len(new_tree_prefix_list) new_tree_prefix_list.append(old_prefix) else: unchanged += 1 new_tree[f'{prefix}'] = len(new_tree_prefix_list) new_tree_prefix_list.append(f'{prefix}') finished = False # print(f'Conf int {np.mean(intervals) if len(intervals) else 0}.') # if collapse_threshold: # print(f'Collapsed: {collapsed}, created when collapsing: {created},' + \ # f'new expanded: {fresh_expand},' + \ # f'unchanged: {unchanged}, total: {len(new_tree_prefix_list)}') if fresh_expand == 0: # len(new_tree_prefix_list) <= len(tree_prefix_list): print('Finished expanding, no new results.') finished = True return new_tree, new_tree_prefix_list, finished def build_from_sample(samples, total_size): """Restores the image from the list of coordinate tuples.""" image = np.zeros([total_size, total_size]) for sample in samples: x = sample[0] y = sample[1] image[x, y] += 1 return image def quantize_vector(vector, left_bound, right_bound): """Modulo clipping of the provided vector.""" if left_bound > right_bound: raise ValueError('Left bound is higher than the right bound.') distance = (right_bound - left_bound) scale = (vector - left_bound) // distance vector -= distance * scale return vector def makeGaussian(image, total_size, fwhm=3, center=None, convert=False, save=False, load=False): """ Make a square gaussian kernel. size is the length of a side of the square fwhm is full-width-half-maximum, which can be thought of as an effective radius. """ import torch if load: return torch.load(f'split_dataset_{fwhm}_{center[0]}_{center[1]}.pt') size = image.shape[0] x = np.arange(0, size, 1, float) y = x[:, np.newaxis] if center is None: x0 = y0 = size // 2 else: x0 = center[0] y0 = center[1] hotspot = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / fwhm ** 2) pos_image = np.floor(hotspot * image) pos_image = pos_image.astype(int) neg_image = image - pos_image if convert: pos_dataset = convert_to_dataset(pos_image, total_size, value=1) neg_dataset = convert_to_dataset(neg_image, total_size, value=0) total_dataset = np.concatenate([pos_dataset, neg_dataset]) res = dict(mask=hotspot, pos_image=pos_image, neg_image=neg_image, pos_dataset=pos_dataset, neg_dataset=neg_dataset, total_dataset=total_dataset) if save: torch.save(res, f'split_dataset_{fwhm}_{center[0]}_{center[1]}.pt') print(f'Saved to split_dataset_{fwhm}_{center[0]}_{center[1]}.pt') return res else: return dict(mask=hotspot, pos_image=pos_image, neg_image=neg_image) def convert_to_dataset(image, total_size, value=None): if value is not None: dataset = np.zeros(image.sum(), dtype=[('x', np.int16), ('y', np.int16), ('pos', np.int8)]) else: dataset = np.zeros(image.sum(), dtype=[('x', np.int16), ('y', np.int16)]) z = 0 for i in tqdm(range(total_size), total=total_size): for j in range(total_size): for _ in range(int(image[i, j])): if value is not None: dataset[z] = (i, j, value) else: dataset[z] = (i, j) z += 1 return dataset def compute_conf_intervals(sum_vector: np.ndarray, level=95): conf_intervals = dict() conf_interval_weighted = dict() if level==95: z= 1.96 elif level == 99: z = 2.576 elif level ==90: z = 1.645 elif level == 98: z = 2.326 else: raise ValueError(f'Incorrect confidence level {level}.') for i in range(0, sum_vector.shape[0], 2): neg_count = sum_vector[i] pos_count = sum_vector[i+1] total_clients_on_map = sum_vector.sum() total_region = neg_count + pos_count if pos_count > 5 and neg_count > 5: p = pos_count / total_region conf_interval = z * np.sqrt( (1-p) * p / total_region) conf_intervals[i] = conf_interval conf_interval_weighted[i] = conf_interval * total_region/total_clients_on_map return conf_intervals, conf_interval_weighted def make_step(samples, eps, threshold, partial, prefix_len, dropout_rate, tree, tree_prefix_list, noiser, quantize, total_size, positivity, count_min): samples_len = len(samples) if count_min: round_vector = np.zeros([partial, depth, width]) sum_sketch.M = np.zeros([depth, width], dtype=np.float64) sum_vector = sum_sketch.get_matrix() else: round_vector = np.zeros([partial, prefix_len]) sum_vector = np.zeros(prefix_len) for j, sample in enumerate(tqdm(samples, leave=False)): if dropout_rate and random.random() <= dropout_rate: continue round_vector[j % partial] = report_coordinate_to_vector( sample, tree, tree_prefix_list, count_min) if j % partial == 0 or j == samples_len - 1: round_vector = noiser.apply_noise(round_vector) if quantize is not None: round_vector = quantize_vector(round_vector, -2 ** ( quantize - 1), 2 ** ( quantize - 1)) sum_vector += quantize_vector( round_vector.sum(axis=0), -2 ** (quantize - 1), 2 ** (quantize - 1)) else: sum_vector += round_vector.sum(axis=0) if count_min: round_vector = np.zeros([partial, depth, width]) else: round_vector = np.zeros([partial, prefix_len]) del round_vector rebuilder = np.copy(sum_vector) if eps: threshold_rebuild = threshold else: threshold_rebuild = 0.0 test_image, pos_image, neg_image = rebuild_from_vector( rebuilder, tree, image_size=total_size, threshold=threshold_rebuild, positivity=positivity, count_min=count_min) grid_contour, _, _ = rebuild_from_vector( sum_vector, tree, image_size=total_size, contour=True, threshold=threshold_rebuild, count_min=count_min) result = AlgResult( image=test_image, sum_vector=sum_vector, tree=tree, tree_prefix_list=tree_prefix_list, threshold=threshold, grid_contour=grid_contour, pos_image=pos_image, neg_image=neg_image, eps=eps) return result, grid_contour
from roundup import date def import_data_12 (db, user, dep, olo) : sd = dict (months = 1.0, required_overtime = 1, weekly = 0) otp = db.overtime_period.filter (None, sd) assert len (otp) == 1 otp = otp [0] db.user_dynamic.create \ ( hours_fri = 7.5 , hours_sun = 0.0 , hours_wed = 7.75 , vacation_yearly = 25.0 , all_in = 0 , booking_allowed = 1 , durations_allowed = 0 , hours_tue = 7.75 , supp_per_period = 7.0 , weekly_hours = 38.5 , hours_mon = 7.75 , hours_thu = 7.75 , valid_from = date.Date ("2013-01-01.00:00:00") , valid_to = date.Date ("2013-04-01.00:00:00") , weekend_allowed = 0 , travel_full = 0 , hours_sat = 0.0 , department = dep , org_location = olo , overtime_period = otp , user = user ) sd = dict (months = 1.0, required_overtime = 1, weekly = 0) otp = db.overtime_period.filter (None, sd) assert len (otp) == 1 otp = otp [0] db.user_dynamic.create \ ( hours_fri = 7.5 , hours_sun = 0.0 , hours_wed = 7.75 , vacation_yearly = 25.0 , all_in = 0 , booking_allowed = 1 , durations_allowed = 0 , hours_tue = 7.75 , supp_per_period = 7.0 , weekly_hours = 38.5 , hours_mon = 7.75 , hours_thu = 7.75 , valid_from = date.Date ("2013-04-01.00:00:00") , valid_to = date.Date ("2013-07-01.00:00:00") , weekend_allowed = 0 , travel_full = 0 , hours_sat = 0.0 , department = dep , org_location = olo , overtime_period = otp , user = user ) sd = dict (months = 1.0, required_overtime = 1, weekly = 0) otp = db.overtime_period.filter (None, sd) assert len (otp) == 1 otp = otp [0] db.user_dynamic.create \ ( hours_fri = 7.5 , hours_sun = 0.0 , hours_wed = 7.75 , vacation_yearly = 25.0 , all_in = 0 , booking_allowed = 1 , durations_allowed = 0 , hours_tue = 7.75 , supp_per_period = 7.0 , weekly_hours = 38.5 , hours_mon = 7.75 , hours_thu = 7.75 , valid_from = date.Date ("2013-09-01.00:00:00") , weekend_allowed = 0 , travel_full = 0 , hours_sat = 0.0 , department = dep , org_location = olo , overtime_period = otp , user = user ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-01') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '1' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-02') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-03') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-04') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.5 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-05') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-06') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-07') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '16:00' , end = '19:45' , work_location = '1' , wp = '4' ) db.time_record.create \ ( daily_record = dr , start = '12:30' , end = '13:30' , work_location = '1' , wp = '5' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '12:30' , work_location = '1' , wp = '6' ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '16:00' , work_location = '1' , wp = '7' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-08') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '8' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '9' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-09') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , time_activity = '1' , work_location = '1' , wp = '9' ) db.time_record.create \ ( daily_record = dr , start = '12:30' , end = '13:30' , work_location = '1' , wp = '10' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '11:00' , work_location = '1' , wp = '11' ) db.time_record.create \ ( daily_record = dr , start = '11:00' , end = '12:30' , work_location = '1' , wp = '10' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-10') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-11') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.5 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-12') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-13') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-14') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '18:15' , end = '19:45' , work_location = '1' , wp = '8' ) db.time_record.create \ ( daily_record = dr , start = '12:45' , end = '13:30' , work_location = '1' , wp = '10' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '10:45' , work_location = '1' , wp = '12' ) db.time_record.create \ ( daily_record = dr , start = '10:45' , end = '12:45' , work_location = '1' , wp = '13' ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '14:45' , work_location = '1' , wp = '14' ) db.time_record.create \ ( daily_record = dr , start = '14:45' , end = '17:15' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '17:15' , end = '18:15' , work_location = '1' , wp = '4' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-15') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-16') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '11:30' , end = '13:30' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '11:30' , work_location = '1' , wp = '16' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-17') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '16:30' , end = '19:30' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '16:30' , work_location = '1' , wp = '17' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-18') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '11:00' , end = '14:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '11:00' , work_location = '1' , wp = '17' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-19') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-20') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-21') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:45' , work_location = '1' , wp = '10' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '17' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-22') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '18' ) db.time_record.create \ ( daily_record = dr , start = '11:00' , end = '13:30' , work_location = '1' , wp = '19' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '10:00' , work_location = '1' , wp = '20' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '11:00' , work_location = '1' , wp = '11' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-23') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , time_activity = '5' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '11:15' , end = '13:30' , work_location = '1' , wp = '17' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '11:15' , work_location = '1' , wp = '9' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:00' , time_activity = '5' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-24') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , time_activity = '5' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , time_activity = '5' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-25') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '14:00' , time_activity = '5' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-26') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-27') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-28') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-29') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:45' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-30') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-01-31') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-01') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.5 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-02') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-03') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-04') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '12:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '12:30' , end = '15:15' , time_activity = '10' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '15:15' , end = '23:45' , time_activity = '10' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-05') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '00:00' , end = '00:30' , time_activity = '10' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '6' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '6' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-06') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '16:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-07') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '15:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '08:00' , end = '13:30' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '15:00' , end = '22:30' , time_activity = '10' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-08') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '12:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '12:30' , end = '15:30' , work_location = '1' , wp = '10' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-09') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-10') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-11') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-12') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-13') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-14') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-15') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:45' , work_location = '1' , wp = '22' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-16') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-17') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-18') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:15' , time_activity = '1' , work_location = '1' , wp = '9' ) db.time_record.create \ ( daily_record = dr , start = '10:15' , end = '13:30' , time_activity = '1' , work_location = '1' , wp = '9' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '10:15' , work_location = '1' , wp = '23' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-19') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-20') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-21') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '24' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '24' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-22') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '11:00' , end = '14:30' , work_location = '1' , wp = '10' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '11:00' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-23') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-24') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-25') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-26') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-27') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-02-28') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-01') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.5 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-02') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-03') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-04') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-05') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-06') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-07') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-08') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.5 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-09') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-10') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-11') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-12') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-13') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '9' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '9' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-14') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '15' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '9' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-15') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '16:00' , work_location = '1' , wp = '18' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '15' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-16') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-17') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-18') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '9' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '9' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-19') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '22:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-20') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-21') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '25' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '25' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:15' , work_location = '1' , wp = '23' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-22') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '12:00' , end = '13:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '12:00' , work_location = '1' , wp = '24' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-23') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-24') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-25') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:15' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-26') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-27') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:15' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-28') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-29') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-30') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-03-31') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-01') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '1' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-02') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-03') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-04') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-05') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '10:30' , end = '15:15' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-06') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-07') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-08') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-09') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-10') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '08:15' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-11') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-12') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '15:00' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-13') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-14') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-15') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '15:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '15:30' , end = '23:45' , time_activity = '10' , work_location = '3' , wp = '26' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-16') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '16:00' , work_location = '6' , wp = '26' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '6' , wp = '26' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-17') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '6' , wp = '26' ) db.time_record.create \ ( daily_record = dr , start = '08:45' , end = '13:30' , work_location = '6' , wp = '26' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-18') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '15:30' , work_location = '6' , wp = '26' ) db.time_record.create \ ( daily_record = dr , start = '08:45' , end = '13:30' , work_location = '6' , wp = '26' ) db.time_record.create \ ( daily_record = dr , start = '15:30' , end = '21:30' , time_activity = '10' , work_location = '3' , wp = '26' ) db.time_record.create \ ( daily_record = dr , start = '22:00' , end = '22:30' , work_location = '1' , wp = '26' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-19') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '15:00' , work_location = '1' , wp = '22' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-20') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-21') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-22') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '22' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '22' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-23') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '22' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '22' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-24') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '21:30' , end = '22:00' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-25') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '17:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '11:00' , end = '13:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '11:00' , work_location = '1' , wp = '9' ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '1' , wp = '22' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-26') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '14:00' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-27') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-28') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-29') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 0.0 , work_location = '5' , wp = '27' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-04-30') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 0.0 , work_location = '5' , wp = '27' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-01') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '1' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-02') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-03') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '14:15' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-04') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-05') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-06') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '15:00' , end = '19:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '15:00' , work_location = '1' , wp = '20' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-07') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '22:00' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-08') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:30' , work_location = '1' , wp = '22' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '22' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-09') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '1' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-10') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '14:00' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-11') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-12') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-13') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-14') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-15') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-16') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-17') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '14:00' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-18') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-19') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-20') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '1' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-21') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '18' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '28' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-22') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '18' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '18' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-23') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-24') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '15:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-25') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-26') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-27') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '28' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-28') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-29') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '28' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-30') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '1' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-05-31') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-01') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-02') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-03') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '28' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '28' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-04') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-05') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:45' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-06') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-07') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '16:30' , work_location = '1' , wp = '28' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-08') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-09') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-10') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '18' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '18' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-11') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '18' ) db.time_record.create \ ( daily_record = dr , start = '07:45' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-12') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '18' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '28' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-13') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:45' , work_location = '1' , wp = '28' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-14') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '15:30' , work_location = '1' , wp = '28' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-15') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-16') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-17') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:45' , work_location = '1' , wp = '28' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '28' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-18') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:30' , work_location = '1' , wp = '28' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '28' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-19') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '29' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '30' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-20') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '29' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-21') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '15:00' , work_location = '1' , wp = '30' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-22') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-23') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-24') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '18' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '18' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-25') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:30' , work_location = '1' , wp = '21' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '18' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-26') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '28' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '21' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-27') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '28' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '14' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-28') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '28' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '28' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-29') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-06-30') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-02') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '31' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '31' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-03') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:15' , work_location = '1' , wp = '32' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '32' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-04') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '15:00' , end = '19:30' , work_location = '1' , wp = '33' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '15:00' , work_location = '1' , wp = '32' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-05') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '33' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-06') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '11:00' , end = '15:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '11:00' , work_location = '1' , wp = '6' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-07') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-08') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-09') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-10') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-11') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-12') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '35' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '35' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-13') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '15:00' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-14') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-15') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-16') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 0.0 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-17') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '15:00' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-18') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-19') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-20') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-21') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-22') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-23') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:45' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-24') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:30' , work_location = '1' , wp = '36' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '35' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-25') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '15:30' , end = '21:30' , work_location = '1' , wp = '33' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '15:00' , work_location = '1' , wp = '31' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-26') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:15' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-27') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '16:45' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-28') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-29') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-30') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-01') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:45' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-02') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '15:30' , end = '21:00' , work_location = '1' , wp = '37' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '15:00' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-03') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-04') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:45' , end = '13:30' , work_location = '1' , wp = '37' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-05') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-06') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-09-01') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-07') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-08') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '15:00' , end = '21:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '14:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '21:30' , end = '23:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-09') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:30' , work_location = '1' , wp = '32' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '32' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-10') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '37' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '32' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:30' , work_location = '1' , wp = '37' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-11') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '15:00' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-12') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-13') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-14') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-15') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-16') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '1' , wp = '31' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '37' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-17') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '38' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '37' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '22:00' , work_location = '1' , wp = '39' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-18') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '15:15' , work_location = '1' , wp = '33' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '39' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-19') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-20') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-28') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '37' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-29') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-30') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '40' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '40' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-31') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '1' , wp = '40' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-01') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.5 , work_location = '5' , wp = '1' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-02') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-03') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-21') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-22') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:45' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-23') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '15:45' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '16:15' , end = '22:00' , work_location = '1' , wp = '37' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-24') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '37' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-25') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '15:00' , work_location = '1' , wp = '37' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-26') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-10-27') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-04') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:45' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-05') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '12:00' , work_location = '1' , wp = '37' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-06') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '38' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '22:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-07') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '41' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '22:00' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-08') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '37' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-09') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-10') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-11') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:15' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-12') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '19:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '11:30' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '11:30' , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-13') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '22:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-14') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '37' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-15') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:15' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-16') , weekend_allowed = 1 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '13:00' , end = '16:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-17') , weekend_allowed = 1 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '16:00' , end = '20:00' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-18') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '10:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-19') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:15' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '42' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-20') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-21') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '21:30' , end = '22:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-22') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 0.0 , work_location = '5' , wp = '27' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-23') , weekend_allowed = 1 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-24') , weekend_allowed = 1 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '10:15' , end = '14:45' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-25') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:15' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-26') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '21:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-27') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-28') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:00' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-29') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '14:00' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-11-30') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-01') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-02') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '20:30' , end = '22:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-03') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '20:00' , work_location = '1' , wp = '43' ) db.time_record.create \ ( daily_record = dr , start = '08:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-04') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:00' , work_location = '1' , wp = '43' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-05') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '18:15' , work_location = '1' , wp = '38' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '34' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-06') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '1' , wp = '38' ) db.time_record.create \ ( daily_record = dr , start = '09:45' , end = '13:30' , work_location = '1' , wp = '38' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-07') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-08') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-16') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:30' , work_location = '1' , wp = '38' ) db.time_record.create \ ( daily_record = dr , start = '09:30' , end = '13:30' , work_location = '1' , wp = '38' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-17') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '14:00' , end = '17:00' , work_location = '1' , wp = '9' ) db.time_record.create \ ( daily_record = dr , start = '10:00' , end = '13:30' , work_location = '1' , wp = '38' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-18') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 0.0 , work_location = '5' , wp = '27' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-19') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 0.0 , work_location = '5' , wp = '27' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-20') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , start = '12:15' , end = '15:00' , work_location = '1' , wp = '34' ) db.time_record.create \ ( daily_record = dr , start = '09:15' , end = '12:15' , work_location = '1' , wp = '9' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-21') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-22') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-09') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-10') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-11') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-12') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-13') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.5 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-14') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-15') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-23') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-24') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 4.0 , work_location = '5' , wp = '1' ) db.time_record.create \ ( daily_record = dr , duration = 3.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-25') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '1' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-26') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '1' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-27') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.5 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-28') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-29') , weekend_allowed = 0 , required_overtime = 0 ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-30') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 7.75 , work_location = '5' , wp = '2' ) dr = db.daily_record.create \ ( user = user , date = date.Date ('2013-12-31') , weekend_allowed = 0 , required_overtime = 0 ) db.time_record.create \ ( daily_record = dr , duration = 4.0 , work_location = '5' , wp = '1' ) db.time_record.create \ ( daily_record = dr , duration = 3.75 , work_location = '5' , wp = '2' ) db.commit () # end def import_data_12
<filename>Assets/compilers/shader.bzl ShaderLibraryInfo = provider( fields = { "include_directories": "directories where the library files reside", }, ) def _bengine_shader_library_impl(ctx): library_directory = "{}_shader_includes".format(ctx.label.name) isolated_files = [] for src in ctx.files.srcs: isolated_file = ctx.actions.declare_file("{}/{}".format(library_directory, src.short_path)) ctx.actions.symlink( isolated_file, target_file = src, progress_message = "Symlinking {} to {}".format(src.short_path, isolated_file.short_path), ) isolated_files.add(isolated_file) dependency_library_depsets = [] dependency_file_depsets = [] for dep in ctx.attr.deps: dependency_file_depsets.add(dep[DefaultInfo].files) dependency_library_depsets.add(dep[ShaderLibraryInfo].include_directories) return [ DefaultInfo(files = depset(direct = [isolated_files], transitive = dependency_file_depsets)), ShaderLibraryInfo(include_directories = depset(direct = [library_directory], transitive = dependency_library_depsets)), ] bengine_shader_library = rule( implementation = _bengine_shader_library_impl, attrs = { "srcs": attr.label_list(allow_files = True), "deps": attr.label_list(providers = [DefaultInfo, ShaderLibraryInfo]), }, ) def _bengine_shader_impl(ctx): vertex_file = ctx.file.vertex_src fragment_file = ctx.file.fragment_src semantics_file = ctx.file.semantics_file if ctx.attr.output_prefix_path != "": output_file_name = "{}/{}.shader".format(ctx.attr.output_prefix_path, ctx.label.name) else: output_file_name = "{}.shader".format(ctx.label.name) output_file = ctx.actions.declare_file(output_file_name) include_directories = [] for dep in ctx.attr.deps: include_directories.add(dep[ShaderLibraryInfo].include_directories) args = ctx.actions.args() args.add("--vertex-source", vertex_file) args.add("--fragment-source", fragment_file) args.add("--semantics-file", semantics_file) args.add_joined("--include-directories", include_directories, join_with = ",") args.add("--output", output_file) args.add("--quiet") ctx.actions.run( mnemonic = "ShaderCompiler", executable = ctx.executable._compiler, arguments = [args], progress_message = "{}: compiling {}".format(ctx.label.name, output_file_name), inputs = [vertex_file, fragment_file, semantics_file], outputs = [output_file], ) return [ DefaultInfo( files = depset([output_file]), runfiles = ctx.runfiles(files = [output_file]), ), ] bengine_shader = rule( implementation = _bengine_shader_impl, attrs = { "vertex_src": attr.label(allow_single_file = True), "fragment_src": attr.label(allow_single_file = True), "semantics_file": attr.label(allow_single_file = True), "deps": attr.label_list(providers = [ShaderLibraryInfo]), "output_prefix_path": attr.string(), "_compiler": attr.label( default = Label("//assets/compilers:shader_compiler"), allow_single_file = True, executable = True, cfg = "exec", ), }, )
<reponame>tirkarthi/python-cybox<filename>cybox/bindings/win_executable_file_object.py # Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. import sys from mixbox.binding_utils import * from . import cybox_common from . import win_file_object class PEChecksumType(GeneratedsSuper): """The PECheckSumType records the checksum of the PE file, both as found in the file and computed.""" subclass = None superclass = None def __init__(self, PE_Computed_API=None, PE_File_API=None, PE_File_Raw=None): self.PE_Computed_API = PE_Computed_API self.PE_File_API = PE_File_API self.PE_File_Raw = PE_File_Raw def factory(*args_, **kwargs_): if PEChecksumType.subclass: return PEChecksumType.subclass(*args_, **kwargs_) else: return PEChecksumType(*args_, **kwargs_) factory = staticmethod(factory) def get_PE_Computed_API(self): return self.PE_Computed_API def set_PE_Computed_API(self, PE_Computed_API): self.PE_Computed_API = PE_Computed_API def validate_LongObjectPropertyType(self, value): # Validate type cybox_common.LongObjectPropertyType, a restriction on None. pass def get_PE_File_API(self): return self.PE_File_API def set_PE_File_API(self, PE_File_API): self.PE_File_API = PE_File_API def get_PE_File_Raw(self): return self.PE_File_Raw def set_PE_File_Raw(self, PE_File_Raw): self.PE_File_Raw = PE_File_Raw def hasContent_(self): if ( self.PE_Computed_API is not None or self.PE_File_API is not None or self.PE_File_Raw is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEChecksumType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEChecksumType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEChecksumType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEChecksumType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.PE_Computed_API is not None: self.PE_Computed_API.export(lwrite, level, 'WinExecutableFileObj:', name_='PE_Computed_API', pretty_print=pretty_print) if self.PE_File_API is not None: self.PE_File_API.export(lwrite, level, 'WinExecutableFileObj:', name_='PE_File_API', pretty_print=pretty_print) if self.PE_File_Raw is not None: self.PE_File_Raw.export(lwrite, level, 'WinExecutableFileObj:', name_='PE_File_Raw', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'PE_Computed_API': obj_ = cybox_common.LongObjectPropertyType.factory() obj_.build(child_) self.set_PE_Computed_API(obj_) elif nodeName_ == 'PE_File_API': obj_ = cybox_common.LongObjectPropertyType.factory() obj_.build(child_) self.set_PE_File_API(obj_) elif nodeName_ == 'PE_File_Raw': obj_ = cybox_common.LongObjectPropertyType.factory() obj_.build(child_) self.set_PE_File_Raw(obj_) # end class PEChecksumType class PEExportsType(GeneratedsSuper): """PEExportsType specifies the PE File exports data section. The exports data section contains information about symbols exported by the PE File (a DLL) which can be dynamically loaded by other executables. This type abstracts, and its components, abstract the Windows structures.""" subclass = None superclass = None def __init__(self, Name=None, Exported_Functions=None, Number_Of_Functions=None, Exports_Time_Stamp=None, Number_Of_Addresses=None, Number_Of_Names=None): self.Name = Name self.Exported_Functions = Exported_Functions self.Number_Of_Functions = Number_Of_Functions self.Exports_Time_Stamp = Exports_Time_Stamp self.Number_Of_Addresses = Number_Of_Addresses self.Number_Of_Names = Number_Of_Names def factory(*args_, **kwargs_): if PEExportsType.subclass: return PEExportsType.subclass(*args_, **kwargs_) else: return PEExportsType(*args_, **kwargs_) factory = staticmethod(factory) def get_Name(self): return self.Name def set_Name(self, Name): self.Name = Name def get_Exported_Functions(self): return self.Exported_Functions def set_Exported_Functions(self, Exported_Functions): self.Exported_Functions = Exported_Functions def get_Number_Of_Functions(self): return self.Number_Of_Functions def set_Number_Of_Functions(self, Number_Of_Functions): self.Number_Of_Functions = Number_Of_Functions def get_Exports_Time_Stamp(self): return self.Exports_Time_Stamp def set_Exports_Time_Stamp(self, Exports_Time_Stamp): self.Exports_Time_Stamp = Exports_Time_Stamp def validate_DateTimeObjectPropertyType(self, value): # Validate type cybox_common.DateTimeObjectPropertyType, a restriction on None. pass def get_Number_Of_Addresses(self): return self.Number_Of_Addresses def set_Number_Of_Addresses(self, Number_Of_Addresses): self.Number_Of_Addresses = Number_Of_Addresses def validate_LongObjectPropertyType(self, value): # Validate type cybox_common.LongObjectPropertyType, a restriction on None. pass def get_Number_Of_Names(self): return self.Number_Of_Names def set_Number_Of_Names(self, Number_Of_Names): self.Number_Of_Names = Number_Of_Names def hasContent_(self): if ( self. Name is not None or self.Exported_Functions is not None or self.Number_Of_Functions is not None or self.Exports_Time_Stamp is not None or self.Number_Of_Addresses is not None or self.Number_Of_Names is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEExportsType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEExportsType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEExportsType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEExportsType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Name is not None: self.Name.export(lwrite, level, 'WinExecutableFileObj:', name_='Name', pretty_print=pretty_print) if self.Exported_Functions is not None: self.Exported_Functions.export(lwrite, level, 'WinExecutableFileObj:', name_='Exported_Functions', pretty_print=pretty_print) if self.Exports_Time_Stamp is not None: self.Exports_Time_Stamp.export(lwrite, level, 'WinExecutableFileObj:', name_='Exports_Time_Stamp', pretty_print=pretty_print) if self.Number_Of_Addresses is not None: self.Number_Of_Addresses.export(lwrite, level, 'WinExecutableFileObj:', name_='Number_Of_Addresses', pretty_print=pretty_print) if self.Number_Of_Names is not None: self.Number_Of_Names.export(lwrite, level, 'WinExecutableFileObj:', name_='Number_Of_Names', pretty_print=pretty_print) if self.Number_Of_Functions is not None: self.Number_Of_Functions.export(lwrite, level, 'WinExecutableFileObj:', name_='Number_Of_Functions', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Name': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Name(obj_) elif nodeName_ == 'Exported_Functions': obj_ = PEExportedFunctionsType.factory() obj_.build(child_) self.set_Exported_Functions(obj_) elif nodeName_ == 'Number_Of_Functions': obj_ = cybox_common.IntegerObjectPropertyType.factory() obj_.build(child_) self.set_Number_Of_Functions(obj_) elif nodeName_ == 'Exports_Time_Stamp': obj_ = cybox_common.DateTimeObjectPropertyType.factory() obj_.build(child_) self.set_Exports_Time_Stamp(obj_) elif nodeName_ == 'Number_Of_Addresses': obj_ = cybox_common.LongObjectPropertyType.factory() obj_.build(child_) self.set_Number_Of_Addresses(obj_) elif nodeName_ == 'Number_Of_Names': obj_ = cybox_common.LongObjectPropertyType.factory() obj_.build(child_) self.set_Number_Of_Names(obj_) # end class PEExportsType class PEExportedFunctionsType(GeneratedsSuper): """PEExportedFunctionsType specifies a list of PE exported functions""" subclass = None superclass = None def __init__(self, Exported_Function=None): if Exported_Function is None: self.Exported_Function = [] else: self.Exported_Function = Exported_Function def factory(*args_, **kwargs_): if PEExportedFunctionsType.subclass: return PEExportedFunctionsType.subclass(*args_, **kwargs_) else: return PEExportedFunctionsType(*args_, **kwargs_) factory = staticmethod(factory) def get_Exported_Function(self): return self.Exported_Function def set_Exported_Function(self, Exported_Function): self.Exported_Function = Exported_Function def add_Exported_Function(self, value): self.Exported_Function.append(value) def insert_Exported_Function(self, index, value): self.Exported_Function[index] = value def hasContent_(self): if ( self.Exported_Function ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionsType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEExportedFunctionsType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionsType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionsType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' for Exported_Function_ in self.Exported_Function: Exported_Function_.export(lwrite, level, 'WinExecutableFileObj:', name_='Exported_Function', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Exported_Function': obj_ = PEExportedFunctionType.factory() obj_.build(child_) self.Exported_Function.append(obj_) # end class PEExportedFunctionsType class PESectionListType(GeneratedsSuper): """Specifies a list of sections that appear in the PE file.""" subclass = None superclass = None def __init__(self, Section=None): if Section is None: self.Section = [] else: self.Section = Section def factory(*args_, **kwargs_): if PESectionListType.subclass: return PESectionListType.subclass(*args_, **kwargs_) else: return PESectionListType(*args_, **kwargs_) factory = staticmethod(factory) def get_Section(self): return self.Section def set_Section(self, Section): self.Section = Section def add_Section(self, value): self.Section.append(value) def insert_Section(self, index, value): self.Section[index] = value def hasContent_(self): if ( self.Section ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PESectionListType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PESectionListType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PESectionListType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PESectionListType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' for Section_ in self.Section: Section_.export(lwrite, level, 'WinExecutableFileObj:', name_='Section', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Section': obj_ = PESectionType.factory() obj_.build(child_) self.Section.append(obj_) # end class PESectionListType class EntropyType(GeneratedsSuper): """Specifies the result of an entropy computation.""" subclass = None superclass = None def __init__(self, Value=None, Min=None, Max=None): self.Value = Value self.Min = Min self.Max = Max def factory(*args_, **kwargs_): if EntropyType.subclass: return EntropyType.subclass(*args_, **kwargs_) else: return EntropyType(*args_, **kwargs_) factory = staticmethod(factory) def get_Value(self): return self.Value def set_Value(self, Value): self.Value = Value def validate_FloatObjectPropertyType(self, value): # Validate type cybox_common.FloatObjectPropertyType, a restriction on None. pass def get_Min(self): return self.Min def set_Min(self, Min): self.Min = Min def get_Max(self): return self.Max def set_Max(self, Max): self.Max = Max def hasContent_(self): if ( self.Value is not None or self.Min is not None or self.Max is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='EntropyType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='EntropyType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='EntropyType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='EntropyType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Value is not None: self.Value.export(lwrite, level, 'WinExecutableFileObj:', name_='Value', pretty_print=pretty_print) if self.Min is not None: self.Min.export(lwrite, level, 'WinExecutableFileObj:', name_='Min', pretty_print=pretty_print) if self.Max is not None: self.Max.export(lwrite, level, 'WinExecutableFileObj:', name_='Max', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Value': obj_ = cybox_common.FloatObjectPropertyType.factory() obj_.build(child_) self.set_Value(obj_) elif nodeName_ == 'Min': obj_ = cybox_common.FloatObjectPropertyType.factory() obj_.build(child_) self.set_Min(obj_) elif nodeName_ == 'Max': obj_ = cybox_common.FloatObjectPropertyType.factory() obj_.build(child_) self.set_Max(obj_) # end class EntropyType class PEImportType(GeneratedsSuper): """The PEImportType type is intended as container for the properties relevant to PE binary imports.The delay_load field is a boolean value that is intended to describe whether a PE binary import is delay-load or not.The initially_visible field refers to whether the import is initially visible, with regards to being initially visible or hidden in relation to PE binary packing. A packed binary will typically have few initially visible imports, and thus it is necessary to make the distinction between those that are visible initially or only after the binary is unpacked.""" subclass = None superclass = None def __init__(self, initially_visible=None, delay_load=None, File_Name=None, Imported_Functions=None, Virtual_Address=None): self.initially_visible = _cast(bool, initially_visible) self.delay_load = _cast(bool, delay_load) self.File_Name = File_Name self.Imported_Functions = Imported_Functions self.Virtual_Address = Virtual_Address def factory(*args_, **kwargs_): if PEImportType.subclass: return PEImportType.subclass(*args_, **kwargs_) else: return PEImportType(*args_, **kwargs_) factory = staticmethod(factory) def get_File_Name(self): return self.File_Name def set_File_Name(self, File_Name): self.File_Name = File_Name def validate_StringObjectPropertyType(self, value): # Validate type cybox_common.StringObjectPropertyType, a restriction on None. pass def get_Imported_Functions(self): return self.Imported_Functions def set_Imported_Functions(self, Imported_Functions): self.Imported_Functions = Imported_Functions def get_Virtual_Address(self): return self.Virtual_Address def set_Virtual_Address(self, Virtual_Address): self.Virtual_Address = Virtual_Address def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_initially_visible(self): return self.initially_visible def set_initially_visible(self, initially_visible): self.initially_visible = initially_visible def get_delay_load(self): return self.delay_load def set_delay_load(self, delay_load): self.delay_load = delay_load def hasContent_(self): if ( self.File_Name is not None or self.Imported_Functions is not None or self.Virtual_Address is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEImportType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEImportType'): if self.initially_visible is not None: lwrite(' initially_visible="%s"' % self.gds_format_boolean(self.initially_visible, input_name='initially_visible')) if self.delay_load is not None: lwrite(' delay_load="%s"' % self.gds_format_boolean(self.delay_load, input_name='delay_load')) def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.File_Name is not None: self.File_Name.export(lwrite, level, 'WinExecutableFileObj:', name_='File_Name', pretty_print=pretty_print) if self.Imported_Functions is not None: self.Imported_Functions.export(lwrite, level, 'WinExecutableFileObj:', name_='Imported_Functions', pretty_print=pretty_print) if self.Virtual_Address is not None: self.Virtual_Address.export(lwrite, level, 'WinExecutableFileObj:', name_='Virtual_Address', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('initially_visible', node) if value is not None: if value in ('true', '1'): self.initially_visible = True elif value in ('false', '0'): self.initially_visible = False else: raise_parse_error(node, 'Bad boolean attribute') value = find_attr_value_('delay_load', node) if value is not None: if value in ('true', '1'): self.delay_load = True elif value in ('false', '0'): self.delay_load = False else: raise_parse_error(node, 'Bad boolean attribute') def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'File_Name': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_File_Name(obj_) elif nodeName_ == 'Imported_Functions': obj_ = PEImportedFunctionsType.factory() obj_.build(child_) self.set_Imported_Functions(obj_) elif nodeName_ == 'Virtual_Address': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Virtual_Address(obj_) # end class PEImportType class PEImportedFunctionsType(GeneratedsSuper): """A list of PE imported functions""" subclass = None superclass = None def __init__(self, Imported_Function=None): if Imported_Function is None: self.Imported_Function = [] else: self.Imported_Function = Imported_Function def factory(*args_, **kwargs_): if PEImportedFunctionsType.subclass: return PEImportedFunctionsType.subclass(*args_, **kwargs_) else: return PEImportedFunctionsType(*args_, **kwargs_) factory = staticmethod(factory) def get_Imported_Function(self): return self.Imported_Function def set_Imported_Function(self, Imported_Function): self.Imported_Function = Imported_Function def add_Imported_Function(self, value): self.Imported_Function.append(value) def insert_Imported_Function(self, index, value): self.Imported_Function[index] = value def hasContent_(self): if ( self.Imported_Function ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionsType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEImportedFunctionsType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionsType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionsType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' for Imported_Function_ in self.Imported_Function: Imported_Function_.export(lwrite, level, 'WinExecutableFileObj:', name_='Imported_Function', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Imported_Function': obj_ = PEImportedFunctionType.factory() obj_.build(child_) self.Imported_Function.append(obj_) # end class PEImportedFunctionsType class PEResourceContentType(cybox_common.BaseObjectPropertyType): """The PEResourceContentType specifies PE resource types via a union of the PEResourceTypeEnum type and the atomic xs:string type. Its base type is the CybOX Core cybox_common.BaseObjectPropertyType, for permitting complex (i.e. regular-expression based) specifications.This attribute is optional and specifies the expected type for the value of the specified property.""" subclass = None superclass = cybox_common.BaseObjectPropertyType def __init__(self, obfuscation_algorithm_ref=None, refanging_transform_type=None, has_changed=None, delimiter='##comma##', pattern_type=None, datatype='string', refanging_transform=None, is_case_sensitive=True, bit_mask=None, appears_random=None, observed_encoding=None, defanging_algorithm_ref=None, is_obfuscated=None, regex_syntax=None, apply_condition='ANY', trend=None, idref=None, is_defanged=None, id=None, condition=None, valueOf_=None): super(PEResourceContentType, self).__init__(obfuscation_algorithm_ref, refanging_transform_type, has_changed, delimiter, pattern_type, datatype, refanging_transform, is_case_sensitive, bit_mask, appears_random, observed_encoding, defanging_algorithm_ref, is_obfuscated, regex_syntax, apply_condition, trend, idref, is_defanged, id, condition, valueOf_) self.datatype = _cast(None, datatype) self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if PEResourceContentType.subclass: return PEResourceContentType.subclass(*args_, **kwargs_) else: return PEResourceContentType(*args_, **kwargs_) factory = staticmethod(factory) def get_datatype(self): return self.datatype def set_datatype(self, datatype): self.datatype = datatype def get_valueOf_(self): return self.valueOf_ def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_ def hasContent_(self): if ( self.valueOf_ or super(PEResourceContentType, self).hasContent_() ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEResourceContentType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEResourceContentType') if self.hasContent_(): lwrite('>') lwrite(quote_xml(self.valueOf_)) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEResourceContentType'): super(PEResourceContentType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='PEResourceContentType') if self.datatype is not None: lwrite(' datatype=%s' % (quote_attrib(self.datatype), )) def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEResourceContentType', fromsubclass_=False, pretty_print=True): super(PEResourceContentType, self).exportChildren(lwrite, level, 'WinExecutableFileObj:', name_, True, pretty_print=pretty_print) pass def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) self.valueOf_ = get_all_text_(node) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('datatype', node) if value is not None: self.datatype = value super(PEResourceContentType, self).buildAttributes(node, attrs, already_processed) def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class PEResourceContentType class PEResourceType(GeneratedsSuper): """The PEResourceType type is intended as container for the properties relevant to PE binary resources.""" subclass = None superclass = None def __init__(self, Type=None, Name=None, Size=None, Virtual_Address=None, Language=None, Sub_Language=None, Hashes=None, Data=None, extensiontype_=None): self.Type = Type self.Name = Name self.Size = Size self.Virtual_Address = Virtual_Address self.Language = Language self.Sub_Language = Sub_Language self.Hashes = Hashes self.Data = Data self.extensiontype_ = extensiontype_ def factory(*args_, **kwargs_): if PEResourceType.subclass: return PEResourceType.subclass(*args_, **kwargs_) else: return PEResourceType(*args_, **kwargs_) factory = staticmethod(factory) def get_Type(self): return self.Type def set_Type(self, Type): self.Type = Type def validate_PEResourceTypeEnum(self, value): # Validate type PEResourceTypeEnum, a restriction on xs:string. pass def get_Name(self): return self.Name def set_Name(self, Name): self.Name = Name def get_Size(self): return self.Size def set_Size(self, Size): self.Size = Size def get_Virtual_Address(self): return self.Virtual_Address def set_Virtual_Address(self, Virtual_Address): self.Virtual_Address = Virtual_Address def get_Language(self): return self.Language def set_Language(self, Language): self.Language = Language def get_Sub_Language(self): return self.Sub_Language def set_Sub_Language(self, Sub_Language): self.Sub_Language = Sub_Language def validate_StringObjectPropertyType(self, value): # Validate type cybox_common.StringObjectPropertyType, a restriction on None. pass def get_Hashes(self): return self.Hashes def set_Hashes(self, Hashes): self.Hashes = Hashes def get_Data(self): return self.Data def set_Data(self, Data): self.Data = Data def get_extensiontype_(self): return self.extensiontype_ def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_ def hasContent_(self): if ( self.Type is not None or self.Name is not None or self.Size is not None or self.Virtual_Address is not None or self.Language is not None or self.Sub_Language is not None or self.Hashes is not None or self.Data is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEResourceType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEResourceType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEResourceType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEResourceType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Type is not None: self.Type.export(lwrite, level, 'WinExecutableFileObj:', name_='Type', pretty_print=pretty_print) if self.Name is not None: self.Name.export(lwrite, level, 'WinExecutableFileObj:', name_='Name', pretty_print=pretty_print) if self.Size is not None: self.Size.export(lwrite, level, 'WinExecutableFileObj:', name_='Size', pretty_print=pretty_print) if self.Virtual_Address is not None: self.Virtual_Address.export(lwrite, level, 'WinExecutableFileObj:', name_='Virtual_Address', pretty_print=pretty_print) if self.Language is not None: self.Language.export(lwrite, level, 'WinExecutableFileObj:', name_='Language', pretty_print=pretty_print) if self.Sub_Language is not None: self.Sub_Language.export(lwrite, level, 'WinExecutableFileObj:', name_='Sub_Language', pretty_print=pretty_print) if self.Hashes is not None: self.Hashes.export(lwrite, level, 'WinExecutableFileObj:', name_='Hashes', pretty_print=pretty_print) if self.Data is not None: self.Data.export(lwrite, level, 'WinExecutableFileObj:', name_='Data', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('xsi:type', node) if value is not None: self.extensiontype_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Type': obj_ = PEResourceContentType.factory() obj_.build(child_) self.set_Type(obj_) elif nodeName_ == 'Name': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Name(obj_) elif nodeName_ == 'Size': obj_ = cybox_common.PositiveIntegerObjectPropertyType.factory() obj_.build(child_) self.set_Size(obj_) elif nodeName_ == 'Virtual_Address': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Virtual_Address(obj_) elif nodeName_ == 'Language': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Language(obj_) elif nodeName_ == 'Sub_Language': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Sub_Language(obj_) elif nodeName_ == 'Hashes': obj_ = cybox_common.HashListType.factory() obj_.build(child_) self.set_Hashes(obj_) elif nodeName_ == 'Data': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Data(obj_) # end class PEResourceType class PEVersionInfoResourceType(PEResourceType): """The PEVersionInfoResourceType characterizes the special VERSIONINFO resource type. For more information please see: http://msdn.microsoft.com/en- us/library/windows/desktop/aa381058(v=vs.85).aspx""" subclass = None superclass = PEResourceType xmlns = "http://cybox.mitre.org/objects#WinExecutableFileObject-2" xmlns_prefix = "WinExecutableFileObj" xml_type = "PEVersionInfoResourceType" xsi_type = "%s:%s" % (xmlns_prefix, xml_type) def __init__(self, Type=None, Name=None, Hashes=None, Comments=None, CompanyName=None, FileDescription=None, FileVersion=None, InternalName=None, LangID=None, LegalCopyright=None, LegalTrademarks=None, OriginalFilename=None, PrivateBuild=None, ProductName=None, ProductVersion=None, SpecialBuild=None): super(PEVersionInfoResourceType, self).__init__(Type, Name, Hashes, ) self.Comments = Comments self.CompanyName = CompanyName self.FileDescription = FileDescription self.FileVersion = FileVersion self.InternalName = InternalName self.LangID = LangID self.LegalCopyright = LegalCopyright self.LegalTrademarks = LegalTrademarks self.OriginalFilename = OriginalFilename self.PrivateBuild = PrivateBuild self.ProductName = ProductName self.ProductVersion = ProductVersion self.SpecialBuild = SpecialBuild def factory(*args_, **kwargs_): if PEVersionInfoResourceType.subclass: return PEVersionInfoResourceType.subclass(*args_, **kwargs_) else: return PEVersionInfoResourceType(*args_, **kwargs_) factory = staticmethod(factory) def get_Comments(self): return self.Comments def set_Comments(self, Comments): self.Comments = Comments def validate_StringObjectPropertyType(self, value): # Validate type cybox_common.StringObjectPropertyType, a restriction on None. pass def get_CompanyName(self): return self.CompanyName def set_CompanyName(self, CompanyName): self.CompanyName = CompanyName def get_FileDescription(self): return self.FileDescription def set_FileDescription(self, FileDescription): self.FileDescription = FileDescription def get_FileVersion(self): return self.FileVersion def set_FileVersion(self, FileVersion): self.FileVersion = FileVersion def get_InternalName(self): return self.InternalName def set_InternalName(self, InternalName): self.InternalName = InternalName def get_LangID(self): return self.LangID def set_LangID(self, LangID): self.LangID = LangID def get_LegalCopyright(self): return self.LegalCopyright def set_LegalCopyright(self, LegalCopyright): self.LegalCopyright = LegalCopyright def get_LegalTrademarks(self): return self.LegalTrademarks def set_LegalTrademarks(self, LegalTrademarks): self.LegalTrademarks = LegalTrademarks def get_OriginalFilename(self): return self.OriginalFilename def set_OriginalFilename(self, OriginalFilename): self.OriginalFilename = OriginalFilename def get_PrivateBuild(self): return self.PrivateBuild def set_PrivateBuild(self, PrivateBuild): self.PrivateBuild = PrivateBuild def get_ProductName(self): return self.ProductName def set_ProductName(self, ProductName): self.ProductName = ProductName def get_ProductVersion(self): return self.ProductVersion def set_ProductVersion(self, ProductVersion): self.ProductVersion = ProductVersion def get_SpecialBuild(self): return self.SpecialBuild def set_SpecialBuild(self, SpecialBuild): self.SpecialBuild = SpecialBuild def hasContent_(self): if ( self.Comments is not None or self.CompanyName is not None or self.FileDescription is not None or self.FileVersion is not None or self.InternalName is not None or self.LangID is not None or self.LegalCopyright is not None or self.LegalTrademarks is not None or self.OriginalFilename is not None or self.PrivateBuild is not None or self.ProductName is not None or self.ProductVersion is not None or self.SpecialBuild is not None or super(PEVersionInfoResourceType, self).hasContent_() ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEVersionInfoResourceType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEVersionInfoResourceType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEVersionInfoResourceType'): super(PEVersionInfoResourceType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='PEVersionInfoResourceType') if 'xsi:type' not in already_processed: already_processed.add('xsi:type') lwrite(" xsi:type='%s'" % self.xsi_type) def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEVersionInfoResourceType', fromsubclass_=False, pretty_print=True): super(PEVersionInfoResourceType, self).exportChildren(lwrite, level, 'WinExecutableFileObj:', name_, True, pretty_print=pretty_print) if pretty_print: eol_ = '\n' else: eol_ = '' if self.Comments is not None: self.Comments.export(lwrite, level, 'WinExecutableFileObj:', name_='Comments', pretty_print=pretty_print) if self.CompanyName is not None: self.CompanyName.export(lwrite, level, 'WinExecutableFileObj:', name_='CompanyName', pretty_print=pretty_print) if self.FileDescription is not None: self.FileDescription.export(lwrite, level, 'WinExecutableFileObj:', name_='FileDescription', pretty_print=pretty_print) if self.FileVersion is not None: self.FileVersion.export(lwrite, level, 'WinExecutableFileObj:', name_='FileVersion', pretty_print=pretty_print) if self.InternalName is not None: self.InternalName.export(lwrite, level, 'WinExecutableFileObj:', name_='InternalName', pretty_print=pretty_print) if self.LangID is not None: self.LangID.export(lwrite, level, 'WinExecutableFileObj:', name_='LangID', pretty_print=pretty_print) if self.LegalCopyright is not None: self.LegalCopyright.export(lwrite, level, 'WinExecutableFileObj:', name_='LegalCopyright', pretty_print=pretty_print) if self.LegalTrademarks is not None: self.LegalTrademarks.export(lwrite, level, 'WinExecutableFileObj:', name_='LegalTrademarks', pretty_print=pretty_print) if self.OriginalFilename is not None: self.OriginalFilename.export(lwrite, level, 'WinExecutableFileObj:', name_='OriginalFilename', pretty_print=pretty_print) if self.PrivateBuild is not None: self.PrivateBuild.export(lwrite, level, 'WinExecutableFileObj:', name_='PrivateBuild', pretty_print=pretty_print) if self.ProductName is not None: self.ProductName.export(lwrite, level, 'WinExecutableFileObj:', name_='ProductName', pretty_print=pretty_print) if self.ProductVersion is not None: self.ProductVersion.export(lwrite, level, 'WinExecutableFileObj:', name_='ProductVersion', pretty_print=pretty_print) if self.SpecialBuild is not None: self.SpecialBuild.export(lwrite, level, 'WinExecutableFileObj:', name_='SpecialBuild', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): super(PEVersionInfoResourceType, self).buildAttributes(node, attrs, already_processed) def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Comments': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Comments(obj_) elif nodeName_ == 'CompanyName': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_CompanyName(obj_) elif nodeName_ == 'FileDescription': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_FileDescription(obj_) elif nodeName_ == 'FileVersion': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_FileVersion(obj_) elif nodeName_ == 'InternalName': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_InternalName(obj_) elif nodeName_ == 'LangID': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_LangID(obj_) elif nodeName_ == 'LegalCopyright': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_LegalCopyright(obj_) elif nodeName_ == 'LegalTrademarks': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_LegalTrademarks(obj_) elif nodeName_ == 'OriginalFilename': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_OriginalFilename(obj_) elif nodeName_ == 'PrivateBuild': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_PrivateBuild(obj_) elif nodeName_ == 'ProductName': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_ProductName(obj_) elif nodeName_ == 'ProductVersion': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_ProductVersion(obj_) elif nodeName_ == 'SpecialBuild': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_SpecialBuild(obj_) super(PEVersionInfoResourceType, self).buildChildren(child_, node, nodeName_, True) # end class PEVersionInfoResourceType class PEExportedFunctionType(GeneratedsSuper): """PEExportType sepcifies the type describing exported functions.""" subclass = None superclass = None def __init__(self, Function_Name=None, Entry_Point=None, Ordinal=None): self.Function_Name = Function_Name self.Entry_Point = Entry_Point self.Ordinal = Ordinal def factory(*args_, **kwargs_): if PEExportedFunctionType.subclass: return PEExportedFunctionType.subclass(*args_, **kwargs_) else: return PEExportedFunctionType(*args_, **kwargs_) factory = staticmethod(factory) def get_Function_Name(self): return self.Function_Name def set_Function_Name(self, Function_Name): self.Function_Name = Function_Name def validate_StringObjectPropertyType(self, value): # Validate type cybox_common.StringObjectPropertyType, a restriction on None. pass def get_Entry_Point(self): return self.Entry_Point def set_Entry_Point(self, Entry_Point): self.Entry_Point = Entry_Point def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_Ordinal(self): return self.Ordinal def set_Ordinal(self, Ordinal): self.Ordinal = Ordinal def validate_NonNegativeIntegerObjectPropertyType(self, value): # Validate type cybox_common.NonNegativeIntegerObjectPropertyType, a restriction on None. pass def hasContent_(self): if ( self.Function_Name is not None or self.Entry_Point is not None or self.Ordinal is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEExportedFunctionType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Function_Name is not None: self.Function_Name.export(lwrite, level, 'WinExecutableFileObj:', name_='Function_Name', pretty_print=pretty_print) if self.Entry_Point is not None: self.Entry_Point.export(lwrite, level, 'WinExecutableFileObj:', name_='Entry_Point', pretty_print=pretty_print) if self.Ordinal is not None: self.Ordinal.export(lwrite, level, 'WinExecutableFileObj:', name_='Ordinal', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Function_Name': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Function_Name(obj_) elif nodeName_ == 'Entry_Point': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Entry_Point(obj_) elif nodeName_ == 'Ordinal': obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory() obj_.build(child_) self.set_Ordinal(obj_) # end class PEExportedFunctionType class PEResourceListType(GeneratedsSuper): """PEResourceListType specifies a list of resources found in the PE file.""" subclass = None superclass = None def __init__(self, Resource=None): if Resource is None: self.Resource = [] else: self.Resource = Resource def factory(*args_, **kwargs_): if PEResourceListType.subclass: return PEResourceListType.subclass(*args_, **kwargs_) else: return PEResourceListType(*args_, **kwargs_) factory = staticmethod(factory) def get_Resource(self): return self.Resource def set_Resource(self, Resource): self.Resource = Resource def add_Resource(self, value): self.Resource.append(value) def insert_Resource(self, index, value): self.Resource[index] = value def hasContent_(self): if ( self.Resource ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEResourceListType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEResourceListType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEResourceListType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEResourceListType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' for Resource_ in self.Resource: if isinstance(Resource_, PEVersionInfoResourceType): Resource_.export(lwrite, level, 'WinExecutableFileObj:', name_='VersionInfoResource', pretty_print=pretty_print) elif isinstance(Resource_, PEResourceType): Resource_.export(lwrite, level, 'WinExecutableFileObj:', name_='Resource', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Resource': obj_ = PEResourceType.factory() obj_.build(child_) self.add_Resource(obj_) elif nodeName_ == 'VersionInfoResource': obj_ = PEVersionInfoResourceType.factory() obj_.build(child_) self.add_Resource(obj_) # end class PEResourceListType class PEImportedFunctionType(GeneratedsSuper): """PEImportedFunctionType specifies the type describing imported functions.""" subclass = None superclass = None def __init__(self, Function_Name=None, Hint=None, Ordinal=None, Bound=None, Virtual_Address=None): self.Function_Name = Function_Name self.Hint = Hint self.Ordinal = Ordinal self.Bound = Bound self.Virtual_Address = Virtual_Address def factory(*args_, **kwargs_): if PEImportedFunctionType.subclass: return PEImportedFunctionType.subclass(*args_, **kwargs_) else: return PEImportedFunctionType(*args_, **kwargs_) factory = staticmethod(factory) def get_Function_Name(self): return self.Function_Name def set_Function_Name(self, Function_Name): self.Function_Name = Function_Name def validate_StringObjectPropertyType(self, value): # Validate type cybox_common.StringObjectPropertyType, a restriction on None. pass def get_Hint(self): return self.Hint def set_Hint(self, Hint): self.Hint = Hint def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_Ordinal(self): return self.Ordinal def set_Ordinal(self, Ordinal): self.Ordinal = Ordinal def validate_NonNegativeIntegerObjectPropertyType(self, value): # Validate type cybox_common.NonNegativeIntegerObjectPropertyType, a restriction on None. pass def get_Bound(self): return self.Bound def set_Bound(self, Bound): self.Bound = Bound def get_Virtual_Address(self): return self.Virtual_Address def set_Virtual_Address(self, Virtual_Address): self.Virtual_Address = Virtual_Address def hasContent_(self): if ( self.Function_Name is not None or self.Hint is not None or self.Ordinal is not None or self.Bound is not None or self.Virtual_Address is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEImportedFunctionType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Function_Name is not None: self.Function_Name.export(lwrite, level, 'WinExecutableFileObj:', name_='Function_Name', pretty_print=pretty_print) if self.Hint is not None: self.Hint.export(lwrite, level, 'WinExecutableFileObj:', name_='Hint', pretty_print=pretty_print) if self.Ordinal is not None: self.Ordinal.export(lwrite, level, 'WinExecutableFileObj:', name_='Ordinal', pretty_print=pretty_print) if self.Bound is not None: self.Bound.export(lwrite, level, 'WinExecutableFileObj:', name_='Bound', pretty_print=pretty_print) if self.Virtual_Address is not None: self.Virtual_Address.export(lwrite, level, 'WinExecutableFileObj:', name_='Virtual_Address', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Function_Name': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Function_Name(obj_) elif nodeName_ == 'Hint': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Hint(obj_) elif nodeName_ == 'Ordinal': obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory() obj_.build(child_) self.set_Ordinal(obj_) elif nodeName_ == 'Bound': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Bound(obj_) elif nodeName_ == 'Virtual_Address': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Virtual_Address(obj_) # end class PEImportedFunctionType class PEImportListType(GeneratedsSuper): """PEImportListType specifies a list of functions in an import data section.""" subclass = None superclass = None def __init__(self, Import=None): if Import is None: self.Import = [] else: self.Import = Import def factory(*args_, **kwargs_): if PEImportListType.subclass: return PEImportListType.subclass(*args_, **kwargs_) else: return PEImportListType(*args_, **kwargs_) factory = staticmethod(factory) def get_Import(self): return self.Import def set_Import(self, Import): self.Import = Import def add_Import(self, value): self.Import.append(value) def insert_Import(self, index, value): self.Import[index] = value def hasContent_(self): if ( self.Import ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportListType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEImportListType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEImportListType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEImportListType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' for Import_ in self.Import: Import_.export(lwrite, level, 'WinExecutableFileObj:', name_='Import', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Import': obj_ = PEImportType.factory() obj_.build(child_) self.Import.append(obj_) # end class PEImportListType class PESectionType(GeneratedsSuper): """The PESectionType type is intended as container for the properties relevant to PE binary sections. A PE Section consists of a header and data. The PESectionType contains properties that describe the Section Header and metadata computed about the section (e.g., hashes, entropy).""" subclass = None superclass = None def __init__(self, Section_Header=None, Data_Hashes=None, Entropy=None, Header_Hashes=None): self.Section_Header = Section_Header self.Data_Hashes = Data_Hashes self.Entropy = Entropy self.Header_Hashes = Header_Hashes def factory(*args_, **kwargs_): if PESectionType.subclass: return PESectionType.subclass(*args_, **kwargs_) else: return PESectionType(*args_, **kwargs_) factory = staticmethod(factory) def get_Section_Header(self): return self.Section_Header def set_Section_Header(self, Section_Header): self.Section_Header = Section_Header def get_Data_Hashes(self): return self.Data_Hashes def set_Data_Hashes(self, Data_Hashes): self.Data_Hashes = Data_Hashes def get_Entropy(self): return self.Entropy def set_Entropy(self, Entropy): self.Entropy = Entropy def get_Header_Hashes(self): return self.Header_Hashes def set_Header_Hashes(self, Header_Hashes): self.Header_Hashes = Header_Hashes def validate_SectionType(self, value): # Validate type SectionType, a restriction on None. pass def hasContent_(self): if ( self.Section_Header is not None or self.Data_Hashes is not None or self.Entropy is not None or self.Header_Hashes is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PESectionType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PESectionType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PESectionType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PESectionType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Section_Header is not None: self.Section_Header.export(lwrite, level, 'WinExecutableFileObj:', name_='Section_Header', pretty_print=pretty_print) if self.Data_Hashes is not None: self.Data_Hashes.export(lwrite, level, 'WinExecutableFileObj:', name_='Data_Hashes', pretty_print=pretty_print) if self.Entropy is not None: self.Entropy.export(lwrite, level, 'WinExecutableFileObj:', name_='Entropy', pretty_print=pretty_print) if self.Header_Hashes is not None: self.Header_Hashes.export(lwrite, level, 'WinExecutableFileObj:', name_='Header_Hashes', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Section_Header': obj_ = PESectionHeaderStructType.factory() obj_.build(child_) self.set_Section_Header(obj_) elif nodeName_ == 'Data_Hashes': obj_ = cybox_common.HashListType.factory() obj_.build(child_) self.set_Data_Hashes(obj_) elif nodeName_ == 'Entropy': obj_ = EntropyType.factory() obj_.build(child_) self.set_Entropy(obj_) elif nodeName_ == 'Header_Hashes': obj_ = cybox_common.HashListType.factory() obj_.build(child_) self.set_Header_Hashes(obj_) # end class PESectionType class PEDataDirectoryStructType(GeneratedsSuper): """The PEDataDirectoryStruct type is intended as container for the properties relevant to a PE binary's data directory structure.""" subclass = None superclass = None def __init__(self, Virtual_Address=None, Size=None): self.Virtual_Address = Virtual_Address self.Size = Size def factory(*args_, **kwargs_): if PEDataDirectoryStructType.subclass: return PEDataDirectoryStructType.subclass(*args_, **kwargs_) else: return PEDataDirectoryStructType(*args_, **kwargs_) factory = staticmethod(factory) def get_Virtual_Address(self): return self.Virtual_Address def set_Virtual_Address(self, Virtual_Address): self.Virtual_Address = Virtual_Address def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_Size(self): return self.Size def set_Size(self, Size): self.Size = Size def validate_NonNegativeIntegerObjectPropertyType(self, value): # Validate type cybox_common.NonNegativeIntegerObjectPropertyType, a restriction on None. pass def hasContent_(self): if ( self.Virtual_Address is not None or self.Size is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEDataDirectoryStructType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEDataDirectoryStructType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEDataDirectoryStructType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEDataDirectoryStructType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Virtual_Address is not None: self.Virtual_Address.export(lwrite, level, 'WinExecutableFileObj:', name_='Virtual_Address', pretty_print=pretty_print) if self.Size is not None: self.Size.export(lwrite, level, 'WinExecutableFileObj:', name_='Size', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Virtual_Address': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Virtual_Address(obj_) elif nodeName_ == 'Size': obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory() obj_.build(child_) self.set_Size(obj_) # end class PEDataDirectoryStructType class PESectionHeaderStructType(GeneratedsSuper): """The PESectionHeaderStruct type is intended as container for the properties relevant to a PE binary's section header structure.""" subclass = None superclass = None def __init__(self, Name=None, Virtual_Size=None, Virtual_Address=None, Size_Of_Raw_Data=None, Pointer_To_Raw_Data=None, Pointer_To_Relocations=None, Pointer_To_Linenumbers=None, Number_Of_Relocations=None, Number_Of_Linenumbers=None, Characteristics=None): self.Name = Name self.Virtual_Size = Virtual_Size self.Virtual_Address = Virtual_Address self.Size_Of_Raw_Data = Size_Of_Raw_Data self.Pointer_To_Raw_Data = Pointer_To_Raw_Data self.Pointer_To_Relocations = Pointer_To_Relocations self.Pointer_To_Linenumbers = Pointer_To_Linenumbers self.Number_Of_Relocations = Number_Of_Relocations self.Number_Of_Linenumbers = Number_Of_Linenumbers self.Characteristics = Characteristics def factory(*args_, **kwargs_): if PESectionHeaderStructType.subclass: return PESectionHeaderStructType.subclass(*args_, **kwargs_) else: return PESectionHeaderStructType(*args_, **kwargs_) factory = staticmethod(factory) def get_Name(self): return self.Name def set_Name(self, Name): self.Name = Name def validate_StringObjectPropertyType(self, value): # Validate type cybox_common.StringObjectPropertyType, a restriction on None. pass def get_Virtual_Size(self): return self.Virtual_Size def set_Virtual_Size(self, Virtual_Size): self.Virtual_Size = Virtual_Size def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_Virtual_Address(self): return self.Virtual_Address def set_Virtual_Address(self, Virtual_Address): self.Virtual_Address = Virtual_Address def get_Size_Of_Raw_Data(self): return self.Size_Of_Raw_Data def set_Size_Of_Raw_Data(self, Size_Of_Raw_Data): self.Size_Of_Raw_Data = Size_Of_Raw_Data def get_Pointer_To_Raw_Data(self): return self.Pointer_To_Raw_Data def set_Pointer_To_Raw_Data(self, Pointer_To_Raw_Data): self.Pointer_To_Raw_Data = Pointer_To_Raw_Data def get_Pointer_To_Relocations(self): return self.Pointer_To_Relocations def set_Pointer_To_Relocations(self, Pointer_To_Relocations): self.Pointer_To_Relocations = Pointer_To_Relocations def get_Pointer_To_Linenumbers(self): return self.Pointer_To_Linenumbers def set_Pointer_To_Linenumbers(self, Pointer_To_Linenumbers): self.Pointer_To_Linenumbers = Pointer_To_Linenumbers def get_Number_Of_Relocations(self): return self.Number_Of_Relocations def set_Number_Of_Relocations(self, Number_Of_Relocations): self.Number_Of_Relocations = Number_Of_Relocations def validate_NonNegativeIntegerObjectPropertyType(self, value): # Validate type cybox_common.NonNegativeIntegerObjectPropertyType, a restriction on None. pass def get_Number_Of_Linenumbers(self): return self.Number_Of_Linenumbers def set_Number_Of_Linenumbers(self, Number_Of_Linenumbers): self.Number_Of_Linenumbers = Number_Of_Linenumbers def get_Characteristics(self): return self.Characteristics def set_Characteristics(self, Characteristics): self.Characteristics = Characteristics def hasContent_(self): if ( self.Name is not None or self.Virtual_Size is not None or self.Virtual_Address is not None or self.Size_Of_Raw_Data is not None or self.Pointer_To_Raw_Data is not None or self.Pointer_To_Relocations is not None or self.Pointer_To_Linenumbers is not None or self.Number_Of_Relocations is not None or self.Number_Of_Linenumbers is not None or self.Characteristics is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PESectionHeaderStructType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PESectionHeaderStructType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PESectionHeaderStructType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PESectionHeaderStructType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Name is not None: self.Name.export(lwrite, level, 'WinExecutableFileObj:', name_='Name', pretty_print=pretty_print) if self.Virtual_Size is not None: self.Virtual_Size.export(lwrite, level, 'WinExecutableFileObj:', name_='Virtual_Size', pretty_print=pretty_print) if self.Virtual_Address is not None: self.Virtual_Address.export(lwrite, level, 'WinExecutableFileObj:', name_='Virtual_Address', pretty_print=pretty_print) if self.Size_Of_Raw_Data is not None: self.Size_Of_Raw_Data.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Raw_Data', pretty_print=pretty_print) if self.Pointer_To_Raw_Data is not None: self.Pointer_To_Raw_Data.export(lwrite, level, 'WinExecutableFileObj:', name_='Pointer_To_Raw_Data', pretty_print=pretty_print) if self.Pointer_To_Relocations is not None: self.Pointer_To_Relocations.export(lwrite, level, 'WinExecutableFileObj:', name_='Pointer_To_Relocations', pretty_print=pretty_print) if self.Pointer_To_Linenumbers is not None: self.Pointer_To_Linenumbers.export(lwrite, level, 'WinExecutableFileObj:', name_='Pointer_To_Linenumbers', pretty_print=pretty_print) if self.Number_Of_Relocations is not None: self.Number_Of_Relocations.export(lwrite, level, 'WinExecutableFileObj:', name_='Number_Of_Relocations', pretty_print=pretty_print) if self.Number_Of_Linenumbers is not None: self.Number_Of_Linenumbers.export(lwrite, level, 'WinExecutableFileObj:', name_='Number_Of_Linenumbers', pretty_print=pretty_print) if self.Characteristics is not None: self.Characteristics.export(lwrite, level, 'WinExecutableFileObj:', name_='Characteristics', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Name': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Name(obj_) elif nodeName_ == 'Virtual_Size': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Virtual_Size(obj_) elif nodeName_ == 'Virtual_Address': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Virtual_Address(obj_) elif nodeName_ == 'Size_Of_Raw_Data': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Raw_Data(obj_) elif nodeName_ == 'Pointer_To_Raw_Data': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Pointer_To_Raw_Data(obj_) elif nodeName_ == 'Pointer_To_Relocations': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Pointer_To_Relocations(obj_) elif nodeName_ == 'Pointer_To_Linenumbers': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Pointer_To_Linenumbers(obj_) elif nodeName_ == 'Number_Of_Relocations': obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory() obj_.build(child_) self.set_Number_Of_Relocations(obj_) elif nodeName_ == 'Number_Of_Linenumbers': obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory() obj_.build(child_) self.set_Number_Of_Linenumbers(obj_) elif nodeName_ == 'Characteristics': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Characteristics(obj_) # end class PESectionHeaderStructType class DOSHeaderType(GeneratedsSuper): """The DOSHeaderType type is a container for the characteristics of the _IMAGE_DOS_HEADER structure, which can be found in Winnt.h and pe.h. See http://www.csn.ul.ie/~caolan/pub/winresdump/winresdump /doc/pefile.html for more information about the winnt.h file, and http://www.tavi.co.uk/phobos/exeformat.html for even more clarification.""" subclass = None superclass = None def __init__(self, e_magic=None, e_cblp=None, e_cp=None, e_crlc=None, e_cparhdr=None, e_minalloc=None, e_maxalloc=None, e_ss=None, e_sp=None, e_csum=None, e_ip=None, e_cs=None, e_lfarlc=None, e_ovro=None, reserved1=None, e_oemid=None, e_oeminfo=None, reserved2=None, e_lfanew=None, Hashes=None): self.e_magic = e_magic self.e_cblp = e_cblp self.e_cp = e_cp self.e_crlc = e_crlc self.e_cparhdr = e_cparhdr self.e_minalloc = e_minalloc self.e_maxalloc = e_maxalloc self.e_ss = e_ss self.e_sp = e_sp self.e_csum = e_csum self.e_ip = e_ip self.e_cs = e_cs self.e_lfarlc = e_lfarlc self.e_ovro = e_ovro if reserved1 is None: self.reserved1 = [] else: self.reserved1 = reserved1 self.e_oemid = e_oemid self.e_oeminfo = e_oeminfo self.reserved2 = reserved2 self.e_lfanew = e_lfanew self.Hashes = Hashes def factory(*args_, **kwargs_): if DOSHeaderType.subclass: return DOSHeaderType.subclass(*args_, **kwargs_) else: return DOSHeaderType(*args_, **kwargs_) factory = staticmethod(factory) def get_e_magic(self): return self.e_magic def set_e_magic(self, e_magic): self.e_magic = e_magic def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_e_cblp(self): return self.e_cblp def set_e_cblp(self, e_cblp): self.e_cblp = e_cblp def get_e_cp(self): return self.e_cp def set_e_cp(self, e_cp): self.e_cp = e_cp def get_e_crlc(self): return self.e_crlc def set_e_crlc(self, e_crlc): self.e_crlc = e_crlc def get_e_cparhdr(self): return self.e_cparhdr def set_e_cparhdr(self, e_cparhdr): self.e_cparhdr = e_cparhdr def get_e_minalloc(self): return self.e_minalloc def set_e_minalloc(self, e_minalloc): self.e_minalloc = e_minalloc def get_e_maxalloc(self): return self.e_maxalloc def set_e_maxalloc(self, e_maxalloc): self.e_maxalloc = e_maxalloc def get_e_ss(self): return self.e_ss def set_e_ss(self, e_ss): self.e_ss = e_ss def get_e_sp(self): return self.e_sp def set_e_sp(self, e_sp): self.e_sp = e_sp def get_e_csum(self): return self.e_csum def set_e_csum(self, e_csum): self.e_csum = e_csum def get_e_ip(self): return self.e_ip def set_e_ip(self, e_ip): self.e_ip = e_ip def get_e_cs(self): return self.e_cs def set_e_cs(self, e_cs): self.e_cs = e_cs def get_e_lfarlc(self): return self.e_lfarlc def set_e_lfarlc(self, e_lfarlc): self.e_lfarlc = e_lfarlc def get_e_ovro(self): return self.e_ovro def set_e_ovro(self, e_ovro): self.e_ovro = e_ovro def get_reserved1(self): return self.reserved1 def set_reserved1(self, reserved1): self.reserved1 = reserved1 def add_reserved1(self, value): self.reserved1.append(value) def insert_reserved1(self, index, value): self.reserved1[index] = value def get_e_oemid(self): return self.e_oemid def set_e_oemid(self, e_oemid): self.e_oemid = e_oemid def get_e_oeminfo(self): return self.e_oeminfo def set_e_oeminfo(self, e_oeminfo): self.e_oeminfo = e_oeminfo def get_reserved2(self): return self.reserved2 def set_reserved2(self, reserved2): self.reserved2 = reserved2 def get_e_lfanew(self): return self.e_lfanew def set_e_lfanew(self, e_lfanew): self.e_lfanew = e_lfanew def get_Hashes(self): return self.Hashes def set_Hashes(self, Hashes): self.Hashes = Hashes def hasContent_(self): if ( self.e_magic is not None or self.e_cblp is not None or self.e_cp is not None or self.e_crlc is not None or self.e_cparhdr is not None or self.e_minalloc is not None or self.e_maxalloc is not None or self.e_ss is not None or self.e_sp is not None or self.e_csum is not None or self.e_ip is not None or self.e_cs is not None or self.e_lfarlc is not None or self.e_ovro is not None or self.reserved1 or self.e_oemid is not None or self.e_oeminfo is not None or self.reserved2 is not None or self.e_lfanew is not None or self.Hashes is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='DOSHeaderType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='DOSHeaderType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='DOSHeaderType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='DOSHeaderType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.e_magic is not None: self.e_magic.export(lwrite, level, 'WinExecutableFileObj:', name_='e_magic', pretty_print=pretty_print) if self.e_cblp is not None: self.e_cblp.export(lwrite, level, 'WinExecutableFileObj:', name_='e_cblp', pretty_print=pretty_print) if self.e_cp is not None: self.e_cp.export(lwrite, level, 'WinExecutableFileObj:', name_='e_cp', pretty_print=pretty_print) if self.e_crlc is not None: self.e_crlc.export(lwrite, level, 'WinExecutableFileObj:', name_='e_crlc', pretty_print=pretty_print) if self.e_cparhdr is not None: self.e_cparhdr.export(lwrite, level, 'WinExecutableFileObj:', name_='e_cparhdr', pretty_print=pretty_print) if self.e_minalloc is not None: self.e_minalloc.export(lwrite, level, 'WinExecutableFileObj:', name_='e_minalloc', pretty_print=pretty_print) if self.e_maxalloc is not None: self.e_maxalloc.export(lwrite, level, 'WinExecutableFileObj:', name_='e_maxalloc', pretty_print=pretty_print) if self.e_ss is not None: self.e_ss.export(lwrite, level, 'WinExecutableFileObj:', name_='e_ss', pretty_print=pretty_print) if self.e_sp is not None: self.e_sp.export(lwrite, level, 'WinExecutableFileObj:', name_='e_sp', pretty_print=pretty_print) if self.e_csum is not None: self.e_csum.export(lwrite, level, 'WinExecutableFileObj:', name_='e_csum', pretty_print=pretty_print) if self.e_ip is not None: self.e_ip.export(lwrite, level, 'WinExecutableFileObj:', name_='e_ip', pretty_print=pretty_print) if self.e_cs is not None: self.e_cs.export(lwrite, level, 'WinExecutableFileObj:', name_='e_cs', pretty_print=pretty_print) if self.e_lfarlc is not None: self.e_lfarlc.export(lwrite, level, 'WinExecutableFileObj:', name_='e_lfarlc', pretty_print=pretty_print) if self.e_ovro is not None: self.e_ovro.export(lwrite, level, 'WinExecutableFileObj:', name_='e_ovro', pretty_print=pretty_print) for reserved1_ in self.reserved1: reserved1_.export(lwrite, level, 'WinExecutableFileObj:', name_='reserved1', pretty_print=pretty_print) if self.e_oemid is not None: self.e_oemid.export(lwrite, level, 'WinExecutableFileObj:', name_='e_oemid', pretty_print=pretty_print) if self.e_oeminfo is not None: self.e_oeminfo.export(lwrite, level, 'WinExecutableFileObj:', name_='e_oeminfo', pretty_print=pretty_print) if self.reserved2 is not None: self.reserved2.export(lwrite, level, 'WinExecutableFileObj:', name_='reserved2', pretty_print=pretty_print) if self.e_lfanew is not None: self.e_lfanew.export(lwrite, level, 'WinExecutableFileObj:', name_='e_lfanew', pretty_print=pretty_print) if self.Hashes is not None: self.Hashes.export(lwrite, level, 'WinExecutableFileObj:', name_='Hashes', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'e_magic': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_magic(obj_) elif nodeName_ == 'e_cblp': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_cblp(obj_) elif nodeName_ == 'e_cp': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_cp(obj_) elif nodeName_ == 'e_crlc': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_crlc(obj_) elif nodeName_ == 'e_cparhdr': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_cparhdr(obj_) elif nodeName_ == 'e_minalloc': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_minalloc(obj_) elif nodeName_ == 'e_maxalloc': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_maxalloc(obj_) elif nodeName_ == 'e_ss': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_ss(obj_) elif nodeName_ == 'e_sp': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_sp(obj_) elif nodeName_ == 'e_csum': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_csum(obj_) elif nodeName_ == 'e_ip': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_ip(obj_) elif nodeName_ == 'e_cs': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_cs(obj_) elif nodeName_ == 'e_lfarlc': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_lfarlc(obj_) elif nodeName_ == 'e_ovro': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_ovro(obj_) elif nodeName_ == 'reserved1': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.reserved1.append(obj_) elif nodeName_ == 'e_oemid': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_oemid(obj_) elif nodeName_ == 'e_oeminfo': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_oeminfo(obj_) elif nodeName_ == 'reserved2': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_reserved2(obj_) elif nodeName_ == 'e_lfanew': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_e_lfanew(obj_) elif nodeName_ == 'Hashes': obj_ = cybox_common.HashListType.factory() obj_.build(child_) self.set_Hashes(obj_) # end class DOSHeaderType class PEHeadersType(GeneratedsSuper): """PEHeaderType specifies the headers found in PE and COFF files.""" subclass = None superclass = None def __init__(self, DOS_Header=None, Signature=None, File_Header=None, Optional_Header=None, Entropy=None, Hashes=None): self.DOS_Header = DOS_Header self.Signature = Signature self.File_Header = File_Header self.Optional_Header = Optional_Header self.Entropy = Entropy self.Hashes = Hashes def factory(*args_, **kwargs_): if PEHeadersType.subclass: return PEHeadersType.subclass(*args_, **kwargs_) else: return PEHeadersType(*args_, **kwargs_) factory = staticmethod(factory) def get_DOS_Header(self): return self.DOS_Header def set_DOS_Header(self, DOS_Header): self.DOS_Header = DOS_Header def get_Signature(self): return self.Signature def set_Signature(self, Signature): self.Signature = Signature def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_File_Header(self): return self.File_Header def set_File_Header(self, File_Header): self.File_Header = File_Header def get_Optional_Header(self): return self.Optional_Header def set_Optional_Header(self, Optional_Header): self.Optional_Header = Optional_Header def get_Entropy(self): return self.Entropy def set_Entropy(self, Entropy): self.Entropy = Entropy def get_Hashes(self): return self.Hashes def set_Hashes(self, Hashes): self.Hashes = Hashes def hasContent_(self): if ( self.DOS_Header is not None or self.Signature is not None or self.File_Header is not None or self.Optional_Header is not None or self.Entropy is not None or self.Hashes is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEHeadersType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEHeadersType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEHeadersType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEHeadersType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.DOS_Header is not None: self.DOS_Header.export(lwrite, level, 'WinExecutableFileObj:', name_='DOS_Header', pretty_print=pretty_print) if self.Signature is not None: self.Signature.export(lwrite, level, 'WinExecutableFileObj:', name_='Signature', pretty_print=pretty_print) if self.File_Header is not None: self.File_Header.export(lwrite, level, 'WinExecutableFileObj:', name_='File_Header', pretty_print=pretty_print) if self.Optional_Header is not None: self.Optional_Header.export(lwrite, level, 'WinExecutableFileObj:', name_='Optional_Header', pretty_print=pretty_print) if self.Entropy is not None: self.Entropy.export(lwrite, level, 'WinExecutableFileObj:', name_='Entropy', pretty_print=pretty_print) if self.Hashes is not None: self.Hashes.export(lwrite, level, 'WinExecutableFileObj:', name_='Hashes', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'DOS_Header': obj_ = DOSHeaderType.factory() obj_.build(child_) self.set_DOS_Header(obj_) elif nodeName_ == 'Signature': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Signature(obj_) elif nodeName_ == 'File_Header': obj_ = PEFileHeaderType.factory() obj_.build(child_) self.set_File_Header(obj_) elif nodeName_ == 'Optional_Header': obj_ = PEOptionalHeaderType.factory() obj_.build(child_) self.set_Optional_Header(obj_) elif nodeName_ == 'Entropy': obj_ = EntropyType.factory() obj_.build(child_) self.set_Entropy(obj_) elif nodeName_ == 'Hashes': obj_ = cybox_common.HashListType.factory() obj_.build(child_) self.set_Hashes(obj_) # end class PEHeadersType class PEFileHeaderType(GeneratedsSuper): """The PEFileHeaderType type refers to the PE file header (somtimes referred to as the COFF header) and its associated characteristics.""" subclass = None superclass = None def __init__(self, Machine=None, Number_Of_Sections=None, Time_Date_Stamp=None, Pointer_To_Symbol_Table=None, Number_Of_Symbols=None, Size_Of_Optional_Header=None, Characteristics=None, Hashes=None): self.Machine = Machine self.Number_Of_Sections = Number_Of_Sections self.Time_Date_Stamp = Time_Date_Stamp self.Pointer_To_Symbol_Table = Pointer_To_Symbol_Table self.Number_Of_Symbols = Number_Of_Symbols self.Size_Of_Optional_Header = Size_Of_Optional_Header self.Characteristics = Characteristics self.Hashes = Hashes def factory(*args_, **kwargs_): if PEFileHeaderType.subclass: return PEFileHeaderType.subclass(*args_, **kwargs_) else: return PEFileHeaderType(*args_, **kwargs_) factory = staticmethod(factory) def get_Machine(self): return self.Machine def set_Machine(self, Machine): self.Machine = Machine def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_Number_Of_Sections(self): return self.Number_Of_Sections def set_Number_Of_Sections(self, Number_Of_Sections): self.Number_Of_Sections = Number_Of_Sections def validate_NonNegativeIntegerObjectPropertyType(self, value): # Validate type cybox_common.NonNegativeIntegerObjectPropertyType, a restriction on None. pass def get_Time_Date_Stamp(self): return self.Time_Date_Stamp def set_Time_Date_Stamp(self, Time_Date_Stamp): self.Time_Date_Stamp = Time_Date_Stamp def get_Pointer_To_Symbol_Table(self): return self.Pointer_To_Symbol_Table def set_Pointer_To_Symbol_Table(self, Pointer_To_Symbol_Table): self.Pointer_To_Symbol_Table = Pointer_To_Symbol_Table def get_Number_Of_Symbols(self): return self.Number_Of_Symbols def set_Number_Of_Symbols(self, Number_Of_Symbols): self.Number_Of_Symbols = Number_Of_Symbols def get_Size_Of_Optional_Header(self): return self.Size_Of_Optional_Header def set_Size_Of_Optional_Header(self, Size_Of_Optional_Header): self.Size_Of_Optional_Header = Size_Of_Optional_Header def get_Characteristics(self): return self.Characteristics def set_Characteristics(self, Characteristics): self.Characteristics = Characteristics def get_Hashes(self): return self.Hashes def set_Hashes(self, Hashes): self.Hashes = Hashes def hasContent_(self): if ( self.Machine is not None or self.Number_Of_Sections is not None or self.Time_Date_Stamp is not None or self.Pointer_To_Symbol_Table is not None or self.Number_Of_Symbols is not None or self.Size_Of_Optional_Header is not None or self.Characteristics is not None or self.Hashes is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEFileHeaderType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEFileHeaderType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEFileHeaderType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEFileHeaderType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Machine is not None: self.Machine.export(lwrite, level, 'WinExecutableFileObj:', name_='Machine', pretty_print=pretty_print) if self.Number_Of_Sections is not None: self.Number_Of_Sections.export(lwrite, level, 'WinExecutableFileObj:', name_='Number_Of_Sections', pretty_print=pretty_print) if self.Time_Date_Stamp is not None: self.Time_Date_Stamp.export(lwrite, level, 'WinExecutableFileObj:', name_='Time_Date_Stamp', pretty_print=pretty_print) if self.Pointer_To_Symbol_Table is not None: self.Pointer_To_Symbol_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='Pointer_To_Symbol_Table', pretty_print=pretty_print) if self.Number_Of_Symbols is not None: self.Number_Of_Symbols.export(lwrite, level, 'WinExecutableFileObj:', name_='Number_Of_Symbols', pretty_print=pretty_print) if self.Size_Of_Optional_Header is not None: self.Size_Of_Optional_Header.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Optional_Header', pretty_print=pretty_print) if self.Characteristics is not None: self.Characteristics.export(lwrite, level, 'WinExecutableFileObj:', name_='Characteristics', pretty_print=pretty_print) if self.Hashes is not None: self.Hashes.export(lwrite, level, 'WinExecutableFileObj:', name_='Hashes', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Machine': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Machine(obj_) elif nodeName_ == 'Number_Of_Sections': obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory() obj_.build(child_) self.set_Number_Of_Sections(obj_) elif nodeName_ == 'Time_Date_Stamp': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Time_Date_Stamp(obj_) elif nodeName_ == 'Pointer_To_Symbol_Table': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Pointer_To_Symbol_Table(obj_) elif nodeName_ == 'Number_Of_Symbols': obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory() obj_.build(child_) self.set_Number_Of_Symbols(obj_) elif nodeName_ == 'Size_Of_Optional_Header': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Optional_Header(obj_) elif nodeName_ == 'Characteristics': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Characteristics(obj_) elif nodeName_ == 'Hashes': obj_ = cybox_common.HashListType.factory() obj_.build(child_) self.set_Hashes(obj_) # end class PEFileHeaderType class PEOptionalHeaderType(GeneratedsSuper): """The PEOptionalHeaderType type describes the PE Optional Header structure. Additional computed metadata, e.g., hashes of the header, are also included.""" subclass = None superclass = None def __init__(self, Magic=None, Major_Linker_Version=None, Minor_Linker_Version=None, Size_Of_Code=None, Size_Of_Initialized_Data=None, Size_Of_Uninitialized_Data=None, Address_Of_Entry_Point=None, Base_Of_Code=None, Base_Of_Data=None, Image_Base=None, Section_Alignment=None, File_Alignment=None, Major_OS_Version=None, Minor_OS_Version=None, Major_Image_Version=None, Minor_Image_Version=None, Major_Subsystem_Version=None, Minor_Subsystem_Version=None, Win32_Version_Value=None, Size_Of_Image=None, Size_Of_Headers=None, Checksum=None, Subsystem=None, DLL_Characteristics=None, Size_Of_Stack_Reserve=None, Size_Of_Stack_Commit=None, Size_Of_Heap_Reserve=None, Size_Of_Heap_Commit=None, Loader_Flags=None, Number_Of_Rva_And_Sizes=None, Data_Directory=None, Hashes=None): self.Magic = Magic self.Major_Linker_Version = Major_Linker_Version self.Minor_Linker_Version = Minor_Linker_Version self.Size_Of_Code = Size_Of_Code self.Size_Of_Initialized_Data = Size_Of_Initialized_Data self.Size_Of_Uninitialized_Data = Size_Of_Uninitialized_Data self.Address_Of_Entry_Point = Address_Of_Entry_Point self.Base_Of_Code = Base_Of_Code self.Base_Of_Data = Base_Of_Data self.Image_Base = Image_Base self.Section_Alignment = Section_Alignment self.File_Alignment = File_Alignment self.Major_OS_Version = Major_OS_Version self.Minor_OS_Version = Minor_OS_Version self.Major_Image_Version = Major_Image_Version self.Minor_Image_Version = Minor_Image_Version self.Major_Subsystem_Version = Major_Subsystem_Version self.Minor_Subsystem_Version = Minor_Subsystem_Version self.Win32_Version_Value = Win32_Version_Value self.Size_Of_Image = Size_Of_Image self.Size_Of_Headers = Size_Of_Headers self.Checksum = Checksum self.Subsystem = Subsystem self.DLL_Characteristics = DLL_Characteristics self.Size_Of_Stack_Reserve = Size_Of_Stack_Reserve self.Size_Of_Stack_Commit = Size_Of_Stack_Commit self.Size_Of_Heap_Reserve = Size_Of_Heap_Reserve self.Size_Of_Heap_Commit = Size_Of_Heap_Commit self.Loader_Flags = Loader_Flags self.Number_Of_Rva_And_Sizes = Number_Of_Rva_And_Sizes self.Data_Directory = Data_Directory self.Hashes = Hashes def factory(*args_, **kwargs_): if PEOptionalHeaderType.subclass: return PEOptionalHeaderType.subclass(*args_, **kwargs_) else: return PEOptionalHeaderType(*args_, **kwargs_) factory = staticmethod(factory) def get_Magic(self): return self.Magic def set_Magic(self, Magic): self.Magic = Magic def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_Major_Linker_Version(self): return self.Major_Linker_Version def set_Major_Linker_Version(self, Major_Linker_Version): self.Major_Linker_Version = Major_Linker_Version def get_Minor_Linker_Version(self): return self.Minor_Linker_Version def set_Minor_Linker_Version(self, Minor_Linker_Version): self.Minor_Linker_Version = Minor_Linker_Version def get_Size_Of_Code(self): return self.Size_Of_Code def set_Size_Of_Code(self, Size_Of_Code): self.Size_Of_Code = Size_Of_Code def get_Size_Of_Initialized_Data(self): return self.Size_Of_Initialized_Data def set_Size_Of_Initialized_Data(self, Size_Of_Initialized_Data): self.Size_Of_Initialized_Data = Size_Of_Initialized_Data def get_Size_Of_Uninitialized_Data(self): return self.Size_Of_Uninitialized_Data def set_Size_Of_Uninitialized_Data(self, Size_Of_Uninitialized_Data): self.Size_Of_Uninitialized_Data = Size_Of_Uninitialized_Data def get_Address_Of_Entry_Point(self): return self.Address_Of_Entry_Point def set_Address_Of_Entry_Point(self, Address_Of_Entry_Point): self.Address_Of_Entry_Point = Address_Of_Entry_Point def get_Base_Of_Code(self): return self.Base_Of_Code def set_Base_Of_Code(self, Base_Of_Code): self.Base_Of_Code = Base_Of_Code def get_Base_Of_Data(self): return self.Base_Of_Data def set_Base_Of_Data(self, Base_Of_Data): self.Base_Of_Data = Base_Of_Data def get_Image_Base(self): return self.Image_Base def set_Image_Base(self, Image_Base): self.Image_Base = Image_Base def get_Section_Alignment(self): return self.Section_Alignment def set_Section_Alignment(self, Section_Alignment): self.Section_Alignment = Section_Alignment def get_File_Alignment(self): return self.File_Alignment def set_File_Alignment(self, File_Alignment): self.File_Alignment = File_Alignment def get_Major_OS_Version(self): return self.Major_OS_Version def set_Major_OS_Version(self, Major_OS_Version): self.Major_OS_Version = Major_OS_Version def get_Minor_OS_Version(self): return self.Minor_OS_Version def set_Minor_OS_Version(self, Minor_OS_Version): self.Minor_OS_Version = Minor_OS_Version def get_Major_Image_Version(self): return self.Major_Image_Version def set_Major_Image_Version(self, Major_Image_Version): self.Major_Image_Version = Major_Image_Version def get_Minor_Image_Version(self): return self.Minor_Image_Version def set_Minor_Image_Version(self, Minor_Image_Version): self.Minor_Image_Version = Minor_Image_Version def get_Major_Subsystem_Version(self): return self.Major_Subsystem_Version def set_Major_Subsystem_Version(self, Major_Subsystem_Version): self.Major_Subsystem_Version = Major_Subsystem_Version def get_Minor_Subsystem_Version(self): return self.Minor_Subsystem_Version def set_Minor_Subsystem_Version(self, Minor_Subsystem_Version): self.Minor_Subsystem_Version = Minor_Subsystem_Version def get_Win32_Version_Value(self): return self.Win32_Version_Value def set_Win32_Version_Value(self, Win32_Version_Value): self.Win32_Version_Value = Win32_Version_Value def get_Size_Of_Image(self): return self.Size_Of_Image def set_Size_Of_Image(self, Size_Of_Image): self.Size_Of_Image = Size_Of_Image def get_Size_Of_Headers(self): return self.Size_Of_Headers def set_Size_Of_Headers(self, Size_Of_Headers): self.Size_Of_Headers = Size_Of_Headers def get_Checksum(self): return self.Checksum def set_Checksum(self, Checksum): self.Checksum = Checksum def get_Subsystem(self): return self.Subsystem def set_Subsystem(self, Subsystem): self.Subsystem = Subsystem def get_DLL_Characteristics(self): return self.DLL_Characteristics def set_DLL_Characteristics(self, DLL_Characteristics): self.DLL_Characteristics = DLL_Characteristics def get_Size_Of_Stack_Reserve(self): return self.Size_Of_Stack_Reserve def set_Size_Of_Stack_Reserve(self, Size_Of_Stack_Reserve): self.Size_Of_Stack_Reserve = Size_Of_Stack_Reserve def get_Size_Of_Stack_Commit(self): return self.Size_Of_Stack_Commit def set_Size_Of_Stack_Commit(self, Size_Of_Stack_Commit): self.Size_Of_Stack_Commit = Size_Of_Stack_Commit def get_Size_Of_Heap_Reserve(self): return self.Size_Of_Heap_Reserve def set_Size_Of_Heap_Reserve(self, Size_Of_Heap_Reserve): self.Size_Of_Heap_Reserve = Size_Of_Heap_Reserve def get_Size_Of_Heap_Commit(self): return self.Size_Of_Heap_Commit def set_Size_Of_Heap_Commit(self, Size_Of_Heap_Commit): self.Size_Of_Heap_Commit = Size_Of_Heap_Commit def get_Loader_Flags(self): return self.Loader_Flags def set_Loader_Flags(self, Loader_Flags): self.Loader_Flags = Loader_Flags def get_Number_Of_Rva_And_Sizes(self): return self.Number_Of_Rva_And_Sizes def set_Number_Of_Rva_And_Sizes(self, Number_Of_Rva_And_Sizes): self.Number_Of_Rva_And_Sizes = Number_Of_Rva_And_Sizes def get_Data_Directory(self): return self.Data_Directory def set_Data_Directory(self, Data_Directory): self.Data_Directory = Data_Directory def get_Hashes(self): return self.Hashes def set_Hashes(self, Hashes): self.Hashes = Hashes def hasContent_(self): if ( self.Magic is not None or self.Major_Linker_Version is not None or self.Minor_Linker_Version is not None or self.Size_Of_Code is not None or self.Size_Of_Initialized_Data is not None or self.Size_Of_Uninitialized_Data is not None or self.Address_Of_Entry_Point is not None or self.Base_Of_Code is not None or self.Base_Of_Data is not None or self.Image_Base is not None or self.Section_Alignment is not None or self.File_Alignment is not None or self.Major_OS_Version is not None or self.Minor_OS_Version is not None or self.Major_Image_Version is not None or self.Minor_Image_Version is not None or self.Major_Subsystem_Version is not None or self.Minor_Subsystem_Version is not None or self.Win32_Version_Value is not None or self.Size_Of_Image is not None or self.Size_Of_Headers is not None or self.Checksum is not None or self.Subsystem is not None or self.DLL_Characteristics is not None or self.Size_Of_Stack_Reserve is not None or self.Size_Of_Stack_Commit is not None or self.Size_Of_Heap_Reserve is not None or self.Size_Of_Heap_Commit is not None or self.Loader_Flags is not None or self.Number_Of_Rva_And_Sizes is not None or self.Data_Directory is not None or self.Hashes is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEOptionalHeaderType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEOptionalHeaderType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEOptionalHeaderType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEOptionalHeaderType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Magic is not None: self.Magic.export(lwrite, level, 'WinExecutableFileObj:', name_='Magic', pretty_print=pretty_print) if self.Major_Linker_Version is not None: self.Major_Linker_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Major_Linker_Version', pretty_print=pretty_print) if self.Minor_Linker_Version is not None: self.Minor_Linker_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Minor_Linker_Version', pretty_print=pretty_print) if self.Size_Of_Code is not None: self.Size_Of_Code.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Code', pretty_print=pretty_print) if self.Size_Of_Initialized_Data is not None: self.Size_Of_Initialized_Data.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Initialized_Data', pretty_print=pretty_print) if self.Size_Of_Uninitialized_Data is not None: self.Size_Of_Uninitialized_Data.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Uninitialized_Data', pretty_print=pretty_print) if self.Address_Of_Entry_Point is not None: self.Address_Of_Entry_Point.export(lwrite, level, 'WinExecutableFileObj:', name_='Address_Of_Entry_Point', pretty_print=pretty_print) if self.Base_Of_Code is not None: self.Base_Of_Code.export(lwrite, level, 'WinExecutableFileObj:', name_='Base_Of_Code', pretty_print=pretty_print) if self.Base_Of_Data is not None: self.Base_Of_Data.export(lwrite, level, 'WinExecutableFileObj:', name_='Base_Of_Data', pretty_print=pretty_print) if self.Image_Base is not None: self.Image_Base.export(lwrite, level, 'WinExecutableFileObj:', name_='Image_Base', pretty_print=pretty_print) if self.Section_Alignment is not None: self.Section_Alignment.export(lwrite, level, 'WinExecutableFileObj:', name_='Section_Alignment', pretty_print=pretty_print) if self.File_Alignment is not None: self.File_Alignment.export(lwrite, level, 'WinExecutableFileObj:', name_='File_Alignment', pretty_print=pretty_print) if self.Major_OS_Version is not None: self.Major_OS_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Major_OS_Version', pretty_print=pretty_print) if self.Minor_OS_Version is not None: self.Minor_OS_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Minor_OS_Version', pretty_print=pretty_print) if self.Major_Image_Version is not None: self.Major_Image_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Major_Image_Version', pretty_print=pretty_print) if self.Minor_Image_Version is not None: self.Minor_Image_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Minor_Image_Version', pretty_print=pretty_print) if self.Major_Subsystem_Version is not None: self.Major_Subsystem_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Major_Subsystem_Version', pretty_print=pretty_print) if self.Minor_Subsystem_Version is not None: self.Minor_Subsystem_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Minor_Subsystem_Version', pretty_print=pretty_print) if self.Win32_Version_Value is not None: self.Win32_Version_Value.export(lwrite, level, 'WinExecutableFileObj:', name_='Win32_Version_Value', pretty_print=pretty_print) if self.Size_Of_Image is not None: self.Size_Of_Image.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Image', pretty_print=pretty_print) if self.Size_Of_Headers is not None: self.Size_Of_Headers.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Headers', pretty_print=pretty_print) if self.Checksum is not None: self.Checksum.export(lwrite, level, 'WinExecutableFileObj:', name_='Checksum', pretty_print=pretty_print) if self.Subsystem is not None: self.Subsystem.export(lwrite, level, 'WinExecutableFileObj:', name_='Subsystem', pretty_print=pretty_print) if self.DLL_Characteristics is not None: self.DLL_Characteristics.export(lwrite, level, 'WinExecutableFileObj:', name_='DLL_Characteristics', pretty_print=pretty_print) if self.Size_Of_Stack_Reserve is not None: self.Size_Of_Stack_Reserve.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Stack_Reserve', pretty_print=pretty_print) if self.Size_Of_Stack_Commit is not None: self.Size_Of_Stack_Commit.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Stack_Commit', pretty_print=pretty_print) if self.Size_Of_Heap_Reserve is not None: self.Size_Of_Heap_Reserve.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Heap_Reserve', pretty_print=pretty_print) if self.Size_Of_Heap_Commit is not None: self.Size_Of_Heap_Commit.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Heap_Commit', pretty_print=pretty_print) if self.Loader_Flags is not None: self.Loader_Flags.export(lwrite, level, 'WinExecutableFileObj:', name_='Loader_Flags', pretty_print=pretty_print) if self.Number_Of_Rva_And_Sizes is not None: self.Number_Of_Rva_And_Sizes.export(lwrite, level, 'WinExecutableFileObj:', name_='Number_Of_Rva_And_Sizes', pretty_print=pretty_print) if self.Data_Directory is not None: self.Data_Directory.export(lwrite, level, 'WinExecutableFileObj:', name_='Data_Directory', pretty_print=pretty_print) if self.Hashes is not None: self.Hashes.export(lwrite, level, 'WinExecutableFileObj:', name_='Hashes', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Magic': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Magic(obj_) elif nodeName_ == 'Major_Linker_Version': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Major_Linker_Version(obj_) elif nodeName_ == 'Minor_Linker_Version': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Minor_Linker_Version(obj_) elif nodeName_ == 'Size_Of_Code': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Code(obj_) elif nodeName_ == 'Size_Of_Initialized_Data': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Initialized_Data(obj_) elif nodeName_ == 'Size_Of_Uninitialized_Data': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Uninitialized_Data(obj_) elif nodeName_ == 'Address_Of_Entry_Point': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Address_Of_Entry_Point(obj_) elif nodeName_ == 'Base_Of_Code': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Base_Of_Code(obj_) elif nodeName_ == 'Base_Of_Data': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Base_Of_Data(obj_) elif nodeName_ == 'Image_Base': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Image_Base(obj_) elif nodeName_ == 'Section_Alignment': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Section_Alignment(obj_) elif nodeName_ == 'File_Alignment': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_File_Alignment(obj_) elif nodeName_ == 'Major_OS_Version': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Major_OS_Version(obj_) elif nodeName_ == 'Minor_OS_Version': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Minor_OS_Version(obj_) elif nodeName_ == 'Major_Image_Version': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Major_Image_Version(obj_) elif nodeName_ == 'Minor_Image_Version': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Minor_Image_Version(obj_) elif nodeName_ == 'Major_Subsystem_Version': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Major_Subsystem_Version(obj_) elif nodeName_ == 'Minor_Subsystem_Version': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Minor_Subsystem_Version(obj_) elif nodeName_ == 'Win32_Version_Value': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Win32_Version_Value(obj_) elif nodeName_ == 'Size_Of_Image': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Image(obj_) elif nodeName_ == 'Size_Of_Headers': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Headers(obj_) elif nodeName_ == 'Checksum': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Checksum(obj_) elif nodeName_ == 'Subsystem': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Subsystem(obj_) elif nodeName_ == 'DLL_Characteristics': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_DLL_Characteristics(obj_) elif nodeName_ == 'Size_Of_Stack_Reserve': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Stack_Reserve(obj_) elif nodeName_ == 'Size_Of_Stack_Commit': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Stack_Commit(obj_) elif nodeName_ == 'Size_Of_Heap_Reserve': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Heap_Reserve(obj_) elif nodeName_ == 'Size_Of_Heap_Commit': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Heap_Commit(obj_) elif nodeName_ == 'Loader_Flags': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Loader_Flags(obj_) elif nodeName_ == 'Number_Of_Rva_And_Sizes': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Number_Of_Rva_And_Sizes(obj_) elif nodeName_ == 'Data_Directory': obj_ = DataDirectoryType.factory() obj_.build(child_) self.set_Data_Directory(obj_) elif nodeName_ == 'Hashes': obj_ = cybox_common.HashListType.factory() obj_.build(child_) self.set_Hashes(obj_) # end class PEOptionalHeaderType class DataDirectoryType(GeneratedsSuper): """The DataDirectoryType specifies the data directories that can appear in the PE file's optional header. The data directories, except the Certificate Table, are loaded into memory so they can be used at runtime.""" subclass = None superclass = None def __init__(self, Export_Table=None, Import_Table=None, Resource_Table=None, Exception_Table=None, Certificate_Table=None, Base_Relocation_Table=None, Debug=None, Architecture=None, Global_Ptr=None, TLS_Table=None, Load_Config_Table=None, Bound_Import=None, Import_Address_Table=None, Delay_Import_Descriptor=None, CLR_Runtime_Header=None, Reserved=None): self.Export_Table = Export_Table self.Import_Table = Import_Table self.Resource_Table = Resource_Table self.Exception_Table = Exception_Table self.Certificate_Table = Certificate_Table self.Base_Relocation_Table = Base_Relocation_Table self.Debug = Debug self.Architecture = Architecture self.Global_Ptr = Global_Ptr self.TLS_Table = TLS_Table self.Load_Config_Table = Load_Config_Table self.Bound_Import = Bound_Import self.Import_Address_Table = Import_Address_Table self.Delay_Import_Descriptor = Delay_Import_Descriptor self.CLR_Runtime_Header = CLR_Runtime_Header self.Reserved = Reserved def factory(*args_, **kwargs_): if DataDirectoryType.subclass: return DataDirectoryType.subclass(*args_, **kwargs_) else: return DataDirectoryType(*args_, **kwargs_) factory = staticmethod(factory) def get_Export_Table(self): return self.Export_Table def set_Export_Table(self, Export_Table): self.Export_Table = Export_Table def get_Import_Table(self): return self.Import_Table def set_Import_Table(self, Import_Table): self.Import_Table = Import_Table def get_Resource_Table(self): return self.Resource_Table def set_Resource_Table(self, Resource_Table): self.Resource_Table = Resource_Table def get_Exception_Table(self): return self.Exception_Table def set_Exception_Table(self, Exception_Table): self.Exception_Table = Exception_Table def get_Certificate_Table(self): return self.Certificate_Table def set_Certificate_Table(self, Certificate_Table): self.Certificate_Table = Certificate_Table def get_Base_Relocation_Table(self): return self.Base_Relocation_Table def set_Base_Relocation_Table(self, Base_Relocation_Table): self.Base_Relocation_Table = Base_Relocation_Table def get_Debug(self): return self.Debug def set_Debug(self, Debug): self.Debug = Debug def get_Architecture(self): return self.Architecture def set_Architecture(self, Architecture): self.Architecture = Architecture def get_Global_Ptr(self): return self.Global_Ptr def set_Global_Ptr(self, Global_Ptr): self.Global_Ptr = Global_Ptr def get_TLS_Table(self): return self.TLS_Table def set_TLS_Table(self, TLS_Table): self.TLS_Table = TLS_Table def get_Load_Config_Table(self): return self.Load_Config_Table def set_Load_Config_Table(self, Load_Config_Table): self.Load_Config_Table = Load_Config_Table def get_Bound_Import(self): return self.Bound_Import def set_Bound_Import(self, Bound_Import): self.Bound_Import = Bound_Import def get_Import_Address_Table(self): return self.Import_Address_Table def set_Import_Address_Table(self, Import_Address_Table): self.Import_Address_Table = Import_Address_Table def get_Delay_Import_Descriptor(self): return self.Delay_Import_Descriptor def set_Delay_Import_Descriptor(self, Delay_Import_Descriptor): self.Delay_Import_Descriptor = Delay_Import_Descriptor def get_CLR_Runtime_Header(self): return self.CLR_Runtime_Header def set_CLR_Runtime_Header(self, CLR_Runtime_Header): self.CLR_Runtime_Header = CLR_Runtime_Header def get_Reserved(self): return self.Reserved def set_Reserved(self, Reserved): self.Reserved = Reserved def hasContent_(self): if ( self.Export_Table is not None or self.Import_Table is not None or self.Resource_Table is not None or self.Exception_Table is not None or self.Certificate_Table is not None or self.Base_Relocation_Table is not None or self.Debug is not None or self.Architecture is not None or self.Global_Ptr is not None or self.TLS_Table is not None or self.Load_Config_Table is not None or self.Bound_Import is not None or self.Import_Address_Table is not None or self.Delay_Import_Descriptor is not None or self.CLR_Runtime_Header is not None or self.Reserved is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='DataDirectoryType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='DataDirectoryType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='DataDirectoryType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='DataDirectoryType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Export_Table is not None: self.Export_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='Export_Table', pretty_print=pretty_print) if self.Import_Table is not None: self.Import_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='Import_Table', pretty_print=pretty_print) if self.Resource_Table is not None: self.Resource_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='Resource_Table', pretty_print=pretty_print) if self.Exception_Table is not None: self.Exception_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='Exception_Table', pretty_print=pretty_print) if self.Certificate_Table is not None: self.Certificate_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='Certificate_Table', pretty_print=pretty_print) if self.Base_Relocation_Table is not None: self.Base_Relocation_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='Base_Relocation_Table', pretty_print=pretty_print) if self.Debug is not None: self.Debug.export(lwrite, level, 'WinExecutableFileObj:', name_='Debug', pretty_print=pretty_print) if self.Architecture is not None: self.Architecture.export(lwrite, level, 'WinExecutableFileObj:', name_='Architecture', pretty_print=pretty_print) if self.Global_Ptr is not None: self.Global_Ptr.export(lwrite, level, 'WinExecutableFileObj:', name_='Global_Ptr', pretty_print=pretty_print) if self.TLS_Table is not None: self.TLS_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='TLS_Table', pretty_print=pretty_print) if self.Load_Config_Table is not None: self.Load_Config_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='Load_Config_Table', pretty_print=pretty_print) if self.Bound_Import is not None: self.Bound_Import.export(lwrite, level, 'WinExecutableFileObj:', name_='Bound_Import', pretty_print=pretty_print) if self.Import_Address_Table is not None: self.Import_Address_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='Import_Address_Table', pretty_print=pretty_print) if self.Delay_Import_Descriptor is not None: self.Delay_Import_Descriptor.export(lwrite, level, 'WinExecutableFileObj:', name_='Delay_Import_Descriptor', pretty_print=pretty_print) if self.CLR_Runtime_Header is not None: self.CLR_Runtime_Header.export(lwrite, level, 'WinExecutableFileObj:', name_='CLR_Runtime_Header', pretty_print=pretty_print) if self.Reserved is not None: self.Reserved.export(lwrite, level, 'WinExecutableFileObj:', name_='Reserved', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Export_Table': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Export_Table(obj_) elif nodeName_ == 'Import_Table': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Import_Table(obj_) elif nodeName_ == 'Resource_Table': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Resource_Table(obj_) elif nodeName_ == 'Exception_Table': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Exception_Table(obj_) elif nodeName_ == 'Certificate_Table': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Certificate_Table(obj_) elif nodeName_ == 'Base_Relocation_Table': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Base_Relocation_Table(obj_) elif nodeName_ == 'Debug': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Debug(obj_) elif nodeName_ == 'Architecture': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Architecture(obj_) elif nodeName_ == 'Global_Ptr': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Global_Ptr(obj_) elif nodeName_ == 'TLS_Table': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_TLS_Table(obj_) elif nodeName_ == 'Load_Config_Table': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Load_Config_Table(obj_) elif nodeName_ == 'Bound_Import': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Bound_Import(obj_) elif nodeName_ == 'Import_Address_Table': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Import_Address_Table(obj_) elif nodeName_ == 'Delay_Import_Descriptor': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Delay_Import_Descriptor(obj_) elif nodeName_ == 'CLR_Runtime_Header': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_CLR_Runtime_Header(obj_) elif nodeName_ == 'Reserved': obj_ = PEDataDirectoryStructType.factory() obj_.build(child_) self.set_Reserved(obj_) # end class DataDirectoryType class PEBuildInformationType(GeneratedsSuper): """The PEBuildInformationType captures information about the tools used to build the PE binary, including the compiler and linker.""" subclass = None superclass = None def __init__(self, Linker_Name=None, Linker_Version=None, Compiler_Name=None, Compiler_Version=None): self.Linker_Name = Linker_Name self.Linker_Version = Linker_Version self.Compiler_Name = Compiler_Name self.Compiler_Version = Compiler_Version def factory(*args_, **kwargs_): if PEBuildInformationType.subclass: return PEBuildInformationType.subclass(*args_, **kwargs_) else: return PEBuildInformationType(*args_, **kwargs_) factory = staticmethod(factory) def get_Linker_Name(self): return self.Linker_Name def set_Linker_Name(self, Linker_Name): self.Linker_Name = Linker_Name def validate_StringObjectPropertyType(self, value): # Validate type cybox_common.StringObjectPropertyType, a restriction on None. pass def get_Linker_Version(self): return self.Linker_Version def set_Linker_Version(self, Linker_Version): self.Linker_Version = Linker_Version def get_Compiler_Name(self): return self.Compiler_Name def set_Compiler_Name(self, Compiler_Name): self.Compiler_Name = Compiler_Name def get_Compiler_Version(self): return self.Compiler_Version def set_Compiler_Version(self, Compiler_Version): self.Compiler_Version = Compiler_Version def hasContent_(self): if ( self.Linker_Name is not None or self.Linker_Version is not None or self.Compiler_Name is not None or self.Compiler_Version is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEBuildInformationType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEBuildInformationType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEBuildInformationType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEBuildInformationType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Linker_Name is not None: self.Linker_Name.export(lwrite, level, 'WinExecutableFileObj:', name_='Linker_Name', pretty_print=pretty_print) if self.Linker_Version is not None: self.Linker_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Linker_Version', pretty_print=pretty_print) if self.Compiler_Name is not None: self.Compiler_Name.export(lwrite, level, 'WinExecutableFileObj:', name_='Compiler_Name', pretty_print=pretty_print) if self.Compiler_Version is not None: self.Compiler_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Compiler_Version', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Linker_Name': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Linker_Name(obj_) elif nodeName_ == 'Linker_Version': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Linker_Version(obj_) elif nodeName_ == 'Compiler_Name': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Compiler_Name(obj_) elif nodeName_ == 'Compiler_Version': obj_ = cybox_common.StringObjectPropertyType.factory() obj_.build(child_) self.set_Compiler_Version(obj_) # end class PEBuildInformationType class PEType(cybox_common.BaseObjectPropertyType): """PEType specifies PE file types via a union of the PETypeEnum type and the atomic xs:string type. Its base type is the CybOX Core cybox_common.BaseObjectPropertyType, for permitting complex (i.e. regular- expression based) specifications.This attribute is optional and specifies the expected type for the value of the specified property.""" subclass = None superclass = cybox_common.BaseObjectPropertyType def __init__(self, obfuscation_algorithm_ref=None, refanging_transform_type=None, has_changed=None, delimiter='##comma##', pattern_type=None, datatype='string', refanging_transform=None, is_case_sensitive=True, bit_mask=None, appears_random=None, observed_encoding=None, defanging_algorithm_ref=None, is_obfuscated=None, regex_syntax=None, apply_condition='ANY', trend=None, idref=None, is_defanged=None, id=None, condition=None, valueOf_=None): super(PEType, self).__init__(obfuscation_algorithm_ref, refanging_transform_type, has_changed, delimiter, pattern_type, datatype, refanging_transform, is_case_sensitive, bit_mask, appears_random, observed_encoding, defanging_algorithm_ref, is_obfuscated, regex_syntax, apply_condition, trend, idref, is_defanged, id, condition, valueOf_) self.datatype = _cast(None, datatype) self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if PEType.subclass: return PEType.subclass(*args_, **kwargs_) else: return PEType(*args_, **kwargs_) factory = staticmethod(factory) def get_datatype(self): return self.datatype def set_datatype(self, datatype): self.datatype = datatype def get_valueOf_(self): return self.valueOf_ def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_ def hasContent_(self): if ( self.valueOf_ or super(PEType, self).hasContent_() ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEType') if self.hasContent_(): lwrite('>') lwrite(quote_xml(self.valueOf_)) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEType'): super(PEType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='PEType') if self.datatype is not None: lwrite(' datatype=%s' % (quote_attrib(self.datatype), )) def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEType', fromsubclass_=False, pretty_print=True): super(PEType, self).exportChildren(lwrite, level, 'WinExecutableFileObj:', name_, True, pretty_print=pretty_print) pass def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) self.valueOf_ = get_all_text_(node) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('datatype', node) if value is not None: self.datatype = value super(PEType, self).buildAttributes(node, attrs, already_processed) def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class PEType class SubsystemType(cybox_common.BaseObjectPropertyType): """SubsystemTypes specifies subsystem types via a union of the SubsystemTypeEnum type and the atomic xs:string type. Its base type is the CybOX Core cybox_common.BaseObjectPropertyType, for permitting complex (i.e. regular-expression based) specifications.This attribute is optional and specifies the expected type for the value of the specified property.""" subclass = None superclass = cybox_common.BaseObjectPropertyType def __init__(self, obfuscation_algorithm_ref=None, refanging_transform_type=None, has_changed=None, delimiter='##comma##', pattern_type=None, datatype='string', refanging_transform=None, is_case_sensitive=True, bit_mask=None, appears_random=None, observed_encoding=None, defanging_algorithm_ref=None, is_obfuscated=None, regex_syntax=None, apply_condition='ANY', trend=None, idref=None, is_defanged=None, id=None, condition=None, valueOf_=None): super(SubsystemType, self).__init__(obfuscation_algorithm_ref, refanging_transform_type, has_changed, delimiter, pattern_type, datatype, refanging_transform, is_case_sensitive, bit_mask, appears_random, observed_encoding, defanging_algorithm_ref, is_obfuscated, regex_syntax, apply_condition, trend, idref, is_defanged, id, condition, valueOf_) self.datatype = _cast(None, datatype) self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if SubsystemType.subclass: return SubsystemType.subclass(*args_, **kwargs_) else: return SubsystemType(*args_, **kwargs_) factory = staticmethod(factory) def get_datatype(self): return self.datatype def set_datatype(self, datatype): self.datatype = datatype def get_valueOf_(self): return self.valueOf_ def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_ def hasContent_(self): if ( self.valueOf_ or super(SubsystemType, self).hasContent_() ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='SubsystemType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='SubsystemType') if self.hasContent_(): lwrite('>') lwrite(quote_xml(self.valueOf_)) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='SubsystemType'): super(SubsystemType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='SubsystemType') if self.datatype is not None: lwrite(' datatype=%s' % (quote_attrib(self.datatype), )) def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='SubsystemType', fromsubclass_=False, pretty_print=True): super(SubsystemType, self).exportChildren(lwrite, level, 'WinExecutableFileObj:', name_, True, pretty_print=pretty_print) pass def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) self.valueOf_ = get_all_text_(node) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('datatype', node) if value is not None: self.datatype = value super(SubsystemType, self).buildAttributes(node, attrs, already_processed) def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class SubsystemType class WindowsExecutableFileObjectType(win_file_object.WindowsFileObjectType): """The WindowsExecutableFileObjectType type is intended to characterize Windows PE (Portable Executable) files.""" subclass = None superclass = win_file_object.WindowsFileObjectType def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, is_packed=None, File_Name=None, File_Path=None, Device_Path=None, Full_Path=None, File_Extension=None, Size_In_Bytes=None, Magic_Number=None, File_Format=None, Hashes=None, Digital_Signatures=None, Modified_Time=None, Accessed_Time=None, Created_Time=None, File_Attributes_List=None, Permissions=None, User_Owner=None, Packer_List=None, Peak_Entropy=None, Sym_Links=None, Byte_Runs=None, Extracted_Features=None, Filename_Accessed_Time=None, Filename_Created_Time=None, Filename_Modified_Time=None, Drive=None, Security_ID=None, Security_Type=None, Stream_List=None, Build_Information=None, Digital_Signature=None, Exports=None, Extraneous_Bytes=None, Headers=None, Imports=None, PE_Checksum=None, Resources=None, Sections=None, Type=None): super(WindowsExecutableFileObjectType, self).__init__(object_reference, Custom_Properties, is_packed, File_Name, File_Path, Device_Path, Full_Path, File_Extension, Size_In_Bytes, Magic_Number, File_Format, Hashes, Digital_Signatures, Modified_Time, Accessed_Time, Created_Time, File_Attributes_List, Permissions, User_Owner, Packer_List, Peak_Entropy, Sym_Links, Byte_Runs, Extracted_Features, Filename_Accessed_Time, Filename_Created_Time, Filename_Modified_Time, Drive, Security_ID, Security_Type, Stream_List, ) self.Build_Information = Build_Information self.Digital_Signature = Digital_Signature self.Exports = Exports self.Extraneous_Bytes = Extraneous_Bytes self.Headers = Headers self.Imports = Imports self.PE_Checksum = PE_Checksum self.Resources = Resources self.Sections = Sections self.Type = Type def factory(*args_, **kwargs_): if WindowsExecutableFileObjectType.subclass: return WindowsExecutableFileObjectType.subclass(*args_, **kwargs_) else: return WindowsExecutableFileObjectType(*args_, **kwargs_) factory = staticmethod(factory) def get_Build_Information(self): return self.Build_Information def set_Build_Information(self, Build_Information): self.Build_Information = Build_Information def get_Digital_Signature(self): return self.Digital_Signature def set_Digital_Signature(self, Digital_Signature): self.Digital_Signature = Digital_Signature def get_Exports(self): return self.Exports def set_Exports(self, Exports): self.Exports = Exports def get_Extraneous_Bytes(self): return self.Extraneous_Bytes def set_Extraneous_Bytes(self, Extraneous_Bytes): self.Extraneous_Bytes = Extraneous_Bytes def validate_IntegerObjectPropertyType(self, value): # Validate type cybox_common.IntegerObjectPropertyType, a restriction on None. pass def get_Headers(self): return self.Headers def set_Headers(self, Headers): self.Headers = Headers def get_Imports(self): return self.Imports def set_Imports(self, Imports): self.Imports = Imports def get_PE_Checksum(self): return self.PE_Checksum def set_PE_Checksum(self, PE_Checksum): self.PE_Checksum = PE_Checksum def get_Resources(self): return self.Resources def set_Resources(self, Resources): self.Resources = Resources def get_Sections(self): return self.Sections def set_Sections(self, Sections): self.Sections = Sections def get_Type(self): return self.Type def set_Type(self, Type): self.Type = Type def validate_PEType(self, value): # Validate type PEType, a restriction on None. pass def hasContent_(self): if ( self.Build_Information is not None or self.Digital_Signature is not None or self.Exports is not None or self.Extraneous_Bytes is not None or self.Headers is not None or self.Imports is not None or self.PE_Checksum is not None or self.Resources is not None or self.Sections is not None or self.Type is not None or super(WindowsExecutableFileObjectType, self).hasContent_() ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='WindowsExecutableFileObjectType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsExecutableFileObjectType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='WindowsExecutableFileObjectType'): super(WindowsExecutableFileObjectType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsExecutableFileObjectType') def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='WindowsExecutableFileObjectType', fromsubclass_=False, pretty_print=True): super(WindowsExecutableFileObjectType, self).exportChildren(lwrite, level, 'WinExecutableFileObj:', name_, True, pretty_print=pretty_print) if pretty_print: eol_ = '\n' else: eol_ = '' if self.Build_Information is not None: self.Build_Information.export(lwrite, level, 'WinExecutableFileObj:', name_='Build_Information', pretty_print=pretty_print) if self.Digital_Signature is not None: self.Digital_Signature.export(lwrite, level, 'WinExecutableFileObj:', name_='Digital_Signature', pretty_print=pretty_print) if self.Exports is not None: self.Exports.export(lwrite, level, 'WinExecutableFileObj:', name_='Exports', pretty_print=pretty_print) if self.Extraneous_Bytes is not None: self.Extraneous_Bytes.export(lwrite, level, 'WinExecutableFileObj:', name_='Extraneous_Bytes', pretty_print=pretty_print) if self.Headers is not None: self.Headers.export(lwrite, level, 'WinExecutableFileObj:', name_='Headers', pretty_print=pretty_print) if self.Imports is not None: self.Imports.export(lwrite, level, 'WinExecutableFileObj:', name_='Imports', pretty_print=pretty_print) if self.PE_Checksum is not None: self.PE_Checksum.export(lwrite, level, 'WinExecutableFileObj:', name_='PE_Checksum', pretty_print=pretty_print) if self.Resources is not None: self.Resources.export(lwrite, level, 'WinExecutableFileObj:', name_='Resources', pretty_print=pretty_print) if self.Sections is not None: self.Sections.export(lwrite, level, 'WinExecutableFileObj:', name_='Sections', pretty_print=pretty_print) if self.Type is not None: self.Type.export(lwrite, level, 'WinExecutableFileObj:', name_='Type', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): super(WindowsExecutableFileObjectType, self).buildAttributes(node, attrs, already_processed) def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Build_Information': obj_ = PEBuildInformationType.factory() obj_.build(child_) self.set_Build_Information(obj_) elif nodeName_ == 'Digital_Signature': obj_ = cybox_common.DigitalSignatureInfoType.factory() obj_.build(child_) self.set_Digital_Signature(obj_) elif nodeName_ == 'Exports': obj_ = PEExportsType.factory() obj_.build(child_) self.set_Exports(obj_) elif nodeName_ == 'Extraneous_Bytes': obj_ = cybox_common.IntegerObjectPropertyType.factory() obj_.build(child_) self.set_Extraneous_Bytes(obj_) elif nodeName_ == 'Headers': obj_ = PEHeadersType.factory() obj_.build(child_) self.set_Headers(obj_) elif nodeName_ == 'Imports': obj_ = PEImportListType.factory() obj_.build(child_) self.set_Imports(obj_) elif nodeName_ == 'PE_Checksum': obj_ = PEChecksumType.factory() obj_.build(child_) self.set_PE_Checksum(obj_) elif nodeName_ == 'Resources': obj_ = PEResourceListType.factory() obj_.build(child_) self.set_Resources(obj_) elif nodeName_ == 'Sections': obj_ = PESectionListType.factory() obj_.build(child_) self.set_Sections(obj_) elif nodeName_ == 'Type': obj_ = PEType.factory() obj_.build(child_) self.set_Type(obj_) super(WindowsExecutableFileObjectType, self).buildChildren(child_, node, nodeName_, True) # end class WindowsExecutableFileObjectType GDSClassesMapping = { 'Extraneous_Bytes': cybox_common.IntegerObjectPropertyType, 'Dependency_Description': cybox_common.StructuredTextType, 'Linker_Version': cybox_common.StringObjectPropertyType, 'Errors': cybox_common.ErrorsType, 'Major_Linker_Version': cybox_common.HexBinaryObjectPropertyType, 'Size_Of_Stack_Commit': cybox_common.HexBinaryObjectPropertyType, 'Filename_Accessed_Time': cybox_common.DateTimeObjectPropertyType, 'Opcodes': cybox_common.StringObjectPropertyType, 'Comments': cybox_common.StringObjectPropertyType, 'Contributors': cybox_common.PersonnelType, 'e_lfanew': cybox_common.HexBinaryObjectPropertyType, 'Loader_Flags': cybox_common.HexBinaryObjectPropertyType, 'Size_Of_Code': cybox_common.HexBinaryObjectPropertyType, 'Metadata': cybox_common.MetadataType, 'e_cblp': cybox_common.HexBinaryObjectPropertyType, 'Image_Base': cybox_common.HexBinaryObjectPropertyType, 'Base_Of_Data': cybox_common.HexBinaryObjectPropertyType, 'Fuzzy_Hash_Structure': cybox_common.FuzzyHashStructureType, 'Size_In_Bytes': cybox_common.UnsignedLongObjectPropertyType, 'e_lfarlc': cybox_common.HexBinaryObjectPropertyType, 'Pointer_To_Linenumbers': cybox_common.HexBinaryObjectPropertyType, 'SpecialBuild': cybox_common.StringObjectPropertyType, 'Information_Source_Type': cybox_common.ControlledVocabularyStringType, 'File_Extension': cybox_common.StringObjectPropertyType, 'Size_Of_Uninitialized_Data': cybox_common.HexBinaryObjectPropertyType, 'Segment_Hash': cybox_common.HashValueType, 'Internal_Strings': cybox_common.InternalStringsType, 'Address_Of_Entry_Point': cybox_common.HexBinaryObjectPropertyType, 'Byte_Runs': cybox_common.ByteRunsType, 'SubDatum': cybox_common.MetadataType, 'Magic': cybox_common.HexBinaryObjectPropertyType, 'Digital_Signature': cybox_common.DigitalSignatureInfoType, 'Checksum': cybox_common.HexBinaryObjectPropertyType, 'e_csum': cybox_common.HexBinaryObjectPropertyType, 'Address': cybox_common.HexBinaryObjectPropertyType, 'Value': cybox_common.StringObjectPropertyType, 'Number_Of_Rva_And_Sizes': cybox_common.HexBinaryObjectPropertyType, 'e_oeminfo': cybox_common.HexBinaryObjectPropertyType, 'Length': cybox_common.IntegerObjectPropertyType, 'Hint': cybox_common.HexBinaryObjectPropertyType, 'Pointer_To_Symbol_Table': cybox_common.HexBinaryObjectPropertyType, 'LegalCopyright': cybox_common.StringObjectPropertyType, 'e_minalloc': cybox_common.HexBinaryObjectPropertyType, 'Encoding': cybox_common.ControlledVocabularyStringType, 'Characteristics': cybox_common.HexBinaryObjectPropertyType, 'PE_Computed_API': cybox_common.LongObjectPropertyType, 'e_cp': cybox_common.HexBinaryObjectPropertyType, 'e_cs': cybox_common.HexBinaryObjectPropertyType, 'File_System_Offset': cybox_common.IntegerObjectPropertyType, 'File_Alignment': cybox_common.HexBinaryObjectPropertyType, 'Certificate_Issuer': cybox_common.StringObjectPropertyType, 'Full_Path': cybox_common.StringObjectPropertyType, 'Attribute': win_file_object.WindowsFileAttributeType, 'Code_Snippet': cybox_common.ObjectPropertiesType, 'Base_Of_Code': cybox_common.HexBinaryObjectPropertyType, 'Number_Of_Linenumbers': cybox_common.NonNegativeIntegerObjectPropertyType, 'Segments': cybox_common.HashSegmentsType, 'Filename_Created_Time': cybox_common.DateTimeObjectPropertyType, 'Functions': cybox_common.FunctionsType, 'Virtual_Address': cybox_common.HexBinaryObjectPropertyType, 'String_Value': cybox_common.StringObjectPropertyType, 'Build_Utility_Platform_Specification': cybox_common.PlatformSpecificationType, 'Number_Of_Relocations': cybox_common.NonNegativeIntegerObjectPropertyType, 'LangID': cybox_common.StringObjectPropertyType, 'e_maxalloc': cybox_common.HexBinaryObjectPropertyType, 'Platform': cybox_common.PlatformSpecificationType, 'Version': cybox_common.StringObjectPropertyType, 'Size_Of_Raw_Data': cybox_common.HexBinaryObjectPropertyType, 'Created_Time': cybox_common.DateTimeObjectPropertyType, 'Name': cybox_common.StringObjectPropertyType, 'Ordinal': cybox_common.NonNegativeIntegerObjectPropertyType, 'Tool_Configuration': cybox_common.ToolConfigurationType, 'Tool_Type': cybox_common.ControlledVocabularyStringType, 'String': cybox_common.ExtractedStringType, 'Size_Of_Heap_Reserve': cybox_common.HexBinaryObjectPropertyType, 'Tool': cybox_common.ToolInformationType, 'Size_Of_Initialized_Data': cybox_common.HexBinaryObjectPropertyType, 'Build_Information': cybox_common.BuildInformationType, 'Size_Of_Stack_Reserve': cybox_common.HexBinaryObjectPropertyType, 'Tool_Hashes': cybox_common.HashListType, 'Subsystem': cybox_common.HexBinaryObjectPropertyType, 'Major_Image_Version': cybox_common.HexBinaryObjectPropertyType, 'Size_Of_Optional_Header': cybox_common.HexBinaryObjectPropertyType, 'Device_Path': cybox_common.StringObjectPropertyType, 'Number_Of_Names': cybox_common.LongObjectPropertyType, 'Error_Instances': cybox_common.ErrorInstancesType, 'Digital_Signatures': cybox_common.DigitalSignaturesType, 'Filename_Modified_Time': cybox_common.DateTimeObjectPropertyType, 'InternalName': cybox_common.StringObjectPropertyType, 'Data_Segment': cybox_common.StringObjectPropertyType, 'Sym_Link': cybox_common.StringObjectPropertyType, 'Compiler_Name': cybox_common.StringObjectPropertyType, 'Win32_Version_Value': cybox_common.HexBinaryObjectPropertyType, 'Signature': cybox_common.StringObjectPropertyType, 'Time_Date_Stamp': cybox_common.HexBinaryObjectPropertyType, 'Property': cybox_common.PropertyType, 'Windows_File': win_file_object.WindowsFileObjectType, 'Strings': cybox_common.ExtractedStringsType, 'e_crlc': cybox_common.HexBinaryObjectPropertyType, 'User_Owner': cybox_common.StringObjectPropertyType, 'Tool_Specific_Data': cybox_common.ToolSpecificDataType, 'Minor_OS_Version': cybox_common.HexBinaryObjectPropertyType, 'Number_Of_Sections': cybox_common.NonNegativeIntegerObjectPropertyType, 'LegalTrademarks': cybox_common.StringObjectPropertyType, 'Reference_Description': cybox_common.StructuredTextType, 'ProductName': cybox_common.StringObjectPropertyType, 'DLL_Characteristics': cybox_common.HexBinaryObjectPropertyType, 'Image_Offset': cybox_common.IntegerObjectPropertyType, 'Size_Of_Headers': cybox_common.HexBinaryObjectPropertyType, 'Configuration_Settings': cybox_common.ConfigurationSettingsType, 'Data_Hashes': cybox_common.HashListType, 'Compiler_Platform_Specification': cybox_common.PlatformSpecificationType, 'Byte_String_Value': cybox_common.HexBinaryObjectPropertyType, 'e_cparhdr': cybox_common.HexBinaryObjectPropertyType, 'Security_Type': cybox_common.SIDType, 'Instance': cybox_common.ObjectPropertiesType, 'PE_File_API': cybox_common.LongObjectPropertyType, 'Import': cybox_common.StringObjectPropertyType, 'Accessed_Time': cybox_common.StringObjectPropertyType, 'Usage_Context_Assumptions': cybox_common.UsageContextAssumptionsType, 'Identifier': cybox_common.PlatformIdentifierType, 'Compiler_Version': cybox_common.StringObjectPropertyType, 'Extracted_Features': cybox_common.ExtractedFeaturesType, 'Execution_Environment': cybox_common.ExecutionEnvironmentType, 'Header_Hashes': cybox_common.HashListType, 'Search_Distance': cybox_common.IntegerObjectPropertyType, 'Block_Hash': cybox_common.FuzzyHashBlockType, 'Compiler_Informal_Description': cybox_common.CompilerInformalDescriptionType, 'Dependencies': cybox_common.DependenciesType, 'Segment_Count': cybox_common.IntegerObjectPropertyType, 'Size_Of_Image': cybox_common.HexBinaryObjectPropertyType, 'PrivateBuild': cybox_common.StringObjectPropertyType, 'Date': cybox_common.DateRangeType, 'Hashes': cybox_common.HashListType, 'Minor_Subsystem_Version': cybox_common.HexBinaryObjectPropertyType, 'Language': cybox_common.StringObjectPropertyType, 'Certificate_Subject': cybox_common.StringObjectPropertyType, 'Offset': cybox_common.IntegerObjectPropertyType, 'System': cybox_common.ObjectPropertiesType, 'e_ovro': cybox_common.HexBinaryObjectPropertyType, 'Dependency': cybox_common.DependencyType, 'PE_File_Raw': cybox_common.LongObjectPropertyType, 'Build_Utility': cybox_common.BuildUtilityType, 'Minor_Image_Version': cybox_common.HexBinaryObjectPropertyType, 'Virtual_Size': cybox_common.HexBinaryObjectPropertyType, 'Trigger_Point': cybox_common.HexBinaryObjectPropertyType, 'Environment_Variable': cybox_common.EnvironmentVariableType, 'Byte_Run': cybox_common.ByteRunType, 'Libraries': cybox_common.LibrariesType, 'Stream': win_file_object.StreamObjectType, 'CompanyName': cybox_common.StringObjectPropertyType, 'Stream_List': win_file_object.StreamListType, 'Imports': cybox_common.ImportsType, 'Number_Of_Symbols': cybox_common.NonNegativeIntegerObjectPropertyType, 'Search_Within': cybox_common.IntegerObjectPropertyType, 'Library': cybox_common.LibraryType, 'Size_Of_Heap_Commit': cybox_common.HexBinaryObjectPropertyType, 'English_Translation': cybox_common.StringObjectPropertyType, 'References': cybox_common.ToolReferencesType, 'Pointer_To_Relocations': cybox_common.HexBinaryObjectPropertyType, 'Size': cybox_common.NonNegativeIntegerObjectPropertyType, 'Block_Hash_Value': cybox_common.HashValueType, 'Time': cybox_common.TimeType, 'Min': cybox_common.FloatObjectPropertyType, 'File_Name': cybox_common.StringObjectPropertyType, 'Configuration_Setting': cybox_common.ConfigurationSettingType, 'Modified_Time': cybox_common.StringObjectPropertyType, 'Fuzzy_Hash_Value': cybox_common.FuzzyHashValueType, 'Machine': cybox_common.HexBinaryObjectPropertyType, 'Security_ID': cybox_common.StringObjectPropertyType, 'Internationalization_Settings': cybox_common.InternationalizationSettingsType, 'reserved1': cybox_common.HexBinaryObjectPropertyType, 'e_sp': cybox_common.HexBinaryObjectPropertyType, 'e_ss': cybox_common.HexBinaryObjectPropertyType, 'reserved2': cybox_common.HexBinaryObjectPropertyType, 'Function': cybox_common.StringObjectPropertyType, 'Usage_Context_Assumption': cybox_common.StructuredTextType, 'Build_Configuration': cybox_common.BuildConfigurationType, 'Magic_Number': cybox_common.HexBinaryObjectPropertyType, 'Linker_Name': cybox_common.StringObjectPropertyType, 'Error': cybox_common.ErrorType, 'Compilers': cybox_common.CompilersType, 'Segment': cybox_common.HashSegmentType, 'Depth': cybox_common.IntegerObjectPropertyType, 'Section_Alignment': cybox_common.HexBinaryObjectPropertyType, 'Compiler': cybox_common.CompilerType, 'FileVersion': cybox_common.StringObjectPropertyType, 'Data_Size': cybox_common.DataSizeType, 'Number_Of_Addresses': cybox_common.LongObjectPropertyType, 'Drive': cybox_common.StringObjectPropertyType, 'Hash': cybox_common.HashType, 'Exports_Time_Stamp': cybox_common.DateTimeObjectPropertyType, 'Minor_Linker_Version': cybox_common.HexBinaryObjectPropertyType, 'Entry_Point': cybox_common.HexBinaryObjectPropertyType, 'ProductVersion': cybox_common.StringObjectPropertyType, 'Code_Snippets': cybox_common.CodeSnippetsType, 'FileDescription': cybox_common.StringObjectPropertyType, 'Signature_Description': cybox_common.StringObjectPropertyType, 'Block_Size': cybox_common.IntegerObjectPropertyType, 'Simple_Hash_Value': cybox_common.SimpleHashValueType, 'OriginalFilename': cybox_common.StringObjectPropertyType, 'e_ip': cybox_common.HexBinaryObjectPropertyType, 'Peak_Entropy': cybox_common.DoubleObjectPropertyType, 'Major_Subsystem_Version': cybox_common.HexBinaryObjectPropertyType, 'File_Format': cybox_common.StringObjectPropertyType, 'Bound': cybox_common.HexBinaryObjectPropertyType, 'Description': cybox_common.StructuredTextType, 'e_oemid': cybox_common.HexBinaryObjectPropertyType, 'Pointer_To_Raw_Data': cybox_common.HexBinaryObjectPropertyType, 'e_magic': cybox_common.HexBinaryObjectPropertyType, 'Max': cybox_common.FloatObjectPropertyType, 'Contributor': cybox_common.ContributorType, 'User_Account_Info': cybox_common.ObjectPropertiesType, 'Tools': cybox_common.ToolsInformationType, 'Custom_Properties': cybox_common.CustomPropertiesType, 'Major_OS_Version': cybox_common.HexBinaryObjectPropertyType, 'Function_Name': cybox_common.StringObjectPropertyType, } USAGE_TEXT = """ Usage: python <Parser>.py [ -s ] <in_xml_file> """ def usage(): print(USAGE_TEXT) sys.exit(1) def get_root_tag(node): tag = Tag_pattern_.match(node.tag).groups()[-1] rootClass = GDSClassesMapping.get(tag) if rootClass is None: rootClass = globals().get(tag) return tag, rootClass def parse(inFileName): doc = parsexml_(inFileName) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'Windows_Executable_File' rootClass = WindowsExecutableFileObjectType rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None # sys.stdout.write('<?xml version="1.0" ?>\n') # rootObj.export(sys.stdout.write, 0, name_=rootTag, # namespacedef_='', # pretty_print=True) return rootObj def parseEtree(inFileName): doc = parsexml_(inFileName) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'Windows_Executable_File' rootClass = WindowsExecutableFileObjectType rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None rootElement = rootObj.to_etree(None, name_=rootTag) content = etree_.tostring(rootElement, pretty_print=True, xml_declaration=True, encoding="utf-8") sys.stdout.write(content) sys.stdout.write('\n') return rootObj, rootElement def parseString(inString): from mixbox.vendor.six import StringIO doc = parsexml_(StringIO(inString)) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'Windows_Executable_File' rootClass = WindowsExecutableFileObjectType rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None # sys.stdout.write('<?xml version="1.0" ?>\n') # rootObj.export(sys.stdout.write, 0, name_="Windows_Executable_File", # namespacedef_='') return rootObj def main(): args = sys.argv[1:] if len(args) == 1: parse(args[0]) else: usage() if __name__ == '__main__': #import pdb; pdb.set_trace() main() __all__ = [ "WindowsExecutableFileObjectType", "PEChecksumType", "PEExportsType", "PEExportedFunctionsType", "PESectionListType", "EntropyType", "PEImportType", "PEImportedFunctionsType", "PEResourceType", "PEVersionInfoResourceType", "PEExportedFunctionType", "PEResourceListType", "PEImportedFunctionType", "PEImportListType", "PESectionType", "PEDataDirectoryStructType", "PESectionHeaderStructType", "DOSHeaderType", "PEHeadersType", "PEFileHeaderType", "SubsystemType", "PEType", "PEOptionalHeaderType", "DataDirectoryType", "PEBuildInformationType" ]