max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
assets/puppeteer.py
KTibow/scorecard
2
6631451
import pytest from pyppeteer import launch, errors as pyppeteer_errors from os import remove, removedirs, mkdir from os.path import exists from glob import glob from json import load import asyncio # Test every endpoint try: mkdir("pytest_screenshots") except FileExistsError: screenshots = glob(r"pytest_screenshots\*") for screenshot in screenshots: remove(screenshot) removedirs("pytest_screenshots") mkdir("pytest_screenshots") @pytest.fixture(scope="module") def event_loop(): loop = asyncio.get_event_loop() yield loop loop.close() @pytest.fixture(scope="function") async def browser(): browser_config = {"autoClose": False, "slowMo": 2} if exists(r"C:\Users\Kende\AppData\Local\Google\Chrome\Application\chrome.exe"): browser_config[ "executablePath" ] = r"C:\Users\Kende\AppData\Local\Google\Chrome\Application\chrome.exe" browser_config["headless"] = False browser = await launch(browser_config) yield browser try: await browser.close() except Exception: pass @pytest.fixture(scope="function") async def page(browser): page = await browser.newPage() page.setDefaultNavigationTimeout(6000) databases = glob("*.db") for database in databases: try: remove(database) except Exception: pass yield page await page.evaluate("localStorage.clear()") await page.close() @pytest.fixture(scope="function") async def tabs_2(browser): databases = glob("*.db") for database in databases: try: remove(database) except Exception: pass page1 = await browser.newPage() page1.setDefaultNavigationTimeout(5000) page2 = await browser.newPage() page2.setDefaultNavigationTimeout(5000) yield [page1, page2] try: await page1.evaluate("localStorage.clear()") except pyppeteer_errors.ElementHandleError: pass await page1.close() try: await page2.evaluate("localStorage.clear()") except pyppeteer_errors.ElementHandleError: pass await page2.close() class TestGeneral(object): @pytest.mark.asyncio @pytest.mark.parametrize("method", ["keyboard", "button", "localStorage"]) async def test_login(self, page, method): await page.goto("http://127.0.0.1:5000") assert "ClueCard | E-ScoreCard for game clues" == await page.title() if method == "localStorage": await page.evaluate("localStorage.setItem('username', 'Kendell')") await page.reload() await page.screenshot({"path": "pytest_screenshots/logging_you_in.png"}) else: await page.keyboard.type("Kendell") if method == "keyboard": await page.keyboard.type("\n") else: await page.click("button") await page.waitForNavigation({"waitUntil": "networkidle2"}) await page.screenshot({"path": f"pytest_screenshots/login_{method}.png"}) assert "http://127.0.0.1:5000/cluecard/Kendell" == page.url assert "Play ClueCard | E-ScoreCard for game clues" == await page.title() @pytest.mark.asyncio async def test_update_dbs(self, tabs_2): # Do initial add for page_index, this_page in enumerate(tabs_2): await this_page.goto("http://127.0.0.1:5000") await this_page.bringToFront() await this_page.keyboard.type(f"Kendell{page_index}") await this_page.evaluate("document.querySelector('button').click()") await this_page.waitForNavigation({"waitUntil": "networkidle2"}) await this_page.evaluate("localStorage.clear()") main_tab = tabs_2[1] await main_tab.bringToFront() await main_tab.focus("#userId") await main_tab.keyboard.type(await tabs_2[0].evaluate("userIdString")) await main_tab.click("#addToGroup") # New id await this_page.goto("http://127.0.0.1:5000") await this_page.keyboard.type("Kendell1") await this_page.evaluate("document.querySelector('button').click()") await this_page.waitForNavigation({"waitUntil": "networkidle2"}) await this_page.evaluate("localStorage.clear()") await asyncio.sleep(0.5) await main_tab.screenshot({"path": "pytest_screenshots/after_update_dbs.png"}) people_in_group = await main_tab.querySelectorEval( "#groupStat", "(el) => {return el.innerHTML}" ) assert people_in_group == "You're in a group with Kendell0 and Kendell1." class TestAddToGroup(object): @pytest.mark.asyncio @pytest.mark.parametrize("use_keyboard", [True, False]) async def test_add_to_group_makenew(self, tabs_2, use_keyboard): for page_index, this_page in enumerate(tabs_2): await this_page.goto("http://127.0.0.1:5000") await this_page.bringToFront() assert "ClueCard | E-ScoreCard for game clues" == await this_page.title() await this_page.keyboard.type(f"Kendell{page_index}") await this_page.evaluate("document.querySelector('button').click()") await this_page.waitForNavigation({"waitUntil": "networkidle2"}) await this_page.evaluate("localStorage.clear()") main_tab = tabs_2[0] await main_tab.bringToFront() await main_tab.focus("#userId") await main_tab.keyboard.type(await tabs_2[1].evaluate("userIdString")) if use_keyboard: await main_tab.keyboard.type("\n") else: await main_tab.click("#addToGroup") # After adding image_path = "pytest_screenshots/add_to_group_" image_path += "with_keyboard" if use_keyboard else "with_button" await main_tab.bringToFront() await asyncio.sleep(0.5) await main_tab.screenshot({"path": image_path + ".png"}) people_in_group = await main_tab.querySelectorEval( "#groupStat", "(el) => {return el.innerHTML}" ) assert people_in_group == "You're in a group with Kendell0 and Kendell1." @pytest.mark.asyncio @pytest.mark.parametrize("tab_to_add", [[0, 2], [1, 2], [2, 0], [2, 1]]) async def test_add_to_existing_group(self, page, tabs_2, tab_to_add): all_tabs = tabs_2 + [page] for page_index, this_page in enumerate(all_tabs): await this_page.goto("http://127.0.0.1:5000") await this_page.bringToFront() await this_page.keyboard.type(f"Kendell{page_index}") await this_page.evaluate("document.querySelector('button').click()") await this_page.waitForNavigation({"waitUntil": "networkidle2"}) await this_page.evaluate("localStorage.clear()") await this_page.evaluate("document.hasFocus = () => {return true;}") # First add main_tab = all_tabs[0] await main_tab.bringToFront() user_id = await main_tab.querySelector("#userId") await main_tab.type("#userId", await all_tabs[1].evaluate("userIdString")) await main_tab.click("#addToGroup") await user_id.click(clickCount=3) # Second add main_tab = all_tabs[tab_to_add[0]] await main_tab.bringToFront() await main_tab.type( "#userId", await all_tabs[tab_to_add[1]].evaluate("userIdString") ) await main_tab.click("#addToGroup") # After adding image_path = "pytest_screenshots/add_to_existing_group" image_path += f"_{tab_to_add}" await main_tab.bringToFront() await asyncio.sleep(0.5) await main_tab.screenshot({"path": image_path + ".png"}) people_in_group = await main_tab.querySelectorEval( "#groupStat", "(el) => {return el.innerHTML}" ) assert ( people_in_group == "You're in a group with Kendell0, Kendell1 and Kendell2." ) class TestMetadata(object): @pytest.mark.asyncio async def test_ready_to_go(self, tabs_2): for index, tab in enumerate(tabs_2): await tab.goto("http://127.0.0.1:5000") await tab.evaluate("localStorage.clear()") await tab.goto("http://127.0.0.1:5000") await tab.bringToFront() await tab.keyboard.type(f"Kendell{index}") await tab.click("button") await tab.waitForNavigation({"waitUntil": "networkidle2"}) tab = tabs_2[0] await tab.bringToFront() user_id = await tab.querySelector("#userId") await tab.type("#userId", await tabs_2[1].evaluate("userIdString")) await tab.click("#addToGroup") await asyncio.sleep(0.5) await tab.click('label[for="imReady"]') await asyncio.sleep(0.5) await tab.screenshot({"path": "pytest_screenshots/ready_to_go_meta.png"}) people_in_group = await tab.querySelectorEval( "#groupStat", "(e) => {return e.innerHTML}" ) assert ( people_in_group == "You're in a group with Kendell0 (✅ is ready) and Kendell1." ) # TODO: Check whether the card buttons are accessible @pytest.mark.asyncio async def test_finished(self, tabs_2): # Getting the tabs ready for index, tab in enumerate(tabs_2): await tab.goto("http://127.0.0.1:5000") await tab.evaluate("localStorage.clear()") await tab.goto("http://127.0.0.1:5000") await tab.bringToFront() await tab.keyboard.type(f"Kendell{index}") await tab.click("button") await tab.waitForNavigation({"waitUntil": "networkidle2"}) # Adding them to group tab = tabs_2[0] await tab.bringToFront() user_id = await tab.querySelector("#userId") await tab.type("#userId", await tabs_2[1].evaluate("userIdString")) await tab.click("#addToGroup") await asyncio.sleep(0.5) # Marking them as ready await tab.click('label[for="imReady"]') await asyncio.sleep(0.5) await tabs_2[1].bringToFront() await tabs_2[1].click('label[for="imReady"]') await asyncio.sleep(0.5) await tab.bringToFront() await asyncio.sleep(10) # Doing the thing with open("groups.db") as group_file: group_database = load(group_file) for clue, is_correct in group_database[0][0].items(): if is_correct == "correct": correct_clue = clue for i in range(2): await tab.click(f'label[for="toggle-{correct_clue[0].lower()}"]') await tab.click(f'label[for="toggle-{correct_clue[1]}"]') await asyncio.sleep(0.2) await tab.click("#addClue") await asyncio.sleep(5) people_in_group = await tab.querySelectorEval( "#groupStat", "(e) => {return e.innerHTML}" ) assert ( people_in_group == "You're in a group with Kendell0 (🏁 finished) and Kendell1 (✅ is ready)." ) # TODO: Test backend seperately # TODO: Check every single line of code
import pytest from pyppeteer import launch, errors as pyppeteer_errors from os import remove, removedirs, mkdir from os.path import exists from glob import glob from json import load import asyncio # Test every endpoint try: mkdir("pytest_screenshots") except FileExistsError: screenshots = glob(r"pytest_screenshots\*") for screenshot in screenshots: remove(screenshot) removedirs("pytest_screenshots") mkdir("pytest_screenshots") @pytest.fixture(scope="module") def event_loop(): loop = asyncio.get_event_loop() yield loop loop.close() @pytest.fixture(scope="function") async def browser(): browser_config = {"autoClose": False, "slowMo": 2} if exists(r"C:\Users\Kende\AppData\Local\Google\Chrome\Application\chrome.exe"): browser_config[ "executablePath" ] = r"C:\Users\Kende\AppData\Local\Google\Chrome\Application\chrome.exe" browser_config["headless"] = False browser = await launch(browser_config) yield browser try: await browser.close() except Exception: pass @pytest.fixture(scope="function") async def page(browser): page = await browser.newPage() page.setDefaultNavigationTimeout(6000) databases = glob("*.db") for database in databases: try: remove(database) except Exception: pass yield page await page.evaluate("localStorage.clear()") await page.close() @pytest.fixture(scope="function") async def tabs_2(browser): databases = glob("*.db") for database in databases: try: remove(database) except Exception: pass page1 = await browser.newPage() page1.setDefaultNavigationTimeout(5000) page2 = await browser.newPage() page2.setDefaultNavigationTimeout(5000) yield [page1, page2] try: await page1.evaluate("localStorage.clear()") except pyppeteer_errors.ElementHandleError: pass await page1.close() try: await page2.evaluate("localStorage.clear()") except pyppeteer_errors.ElementHandleError: pass await page2.close() class TestGeneral(object): @pytest.mark.asyncio @pytest.mark.parametrize("method", ["keyboard", "button", "localStorage"]) async def test_login(self, page, method): await page.goto("http://127.0.0.1:5000") assert "ClueCard | E-ScoreCard for game clues" == await page.title() if method == "localStorage": await page.evaluate("localStorage.setItem('username', 'Kendell')") await page.reload() await page.screenshot({"path": "pytest_screenshots/logging_you_in.png"}) else: await page.keyboard.type("Kendell") if method == "keyboard": await page.keyboard.type("\n") else: await page.click("button") await page.waitForNavigation({"waitUntil": "networkidle2"}) await page.screenshot({"path": f"pytest_screenshots/login_{method}.png"}) assert "http://127.0.0.1:5000/cluecard/Kendell" == page.url assert "Play ClueCard | E-ScoreCard for game clues" == await page.title() @pytest.mark.asyncio async def test_update_dbs(self, tabs_2): # Do initial add for page_index, this_page in enumerate(tabs_2): await this_page.goto("http://127.0.0.1:5000") await this_page.bringToFront() await this_page.keyboard.type(f"Kendell{page_index}") await this_page.evaluate("document.querySelector('button').click()") await this_page.waitForNavigation({"waitUntil": "networkidle2"}) await this_page.evaluate("localStorage.clear()") main_tab = tabs_2[1] await main_tab.bringToFront() await main_tab.focus("#userId") await main_tab.keyboard.type(await tabs_2[0].evaluate("userIdString")) await main_tab.click("#addToGroup") # New id await this_page.goto("http://127.0.0.1:5000") await this_page.keyboard.type("Kendell1") await this_page.evaluate("document.querySelector('button').click()") await this_page.waitForNavigation({"waitUntil": "networkidle2"}) await this_page.evaluate("localStorage.clear()") await asyncio.sleep(0.5) await main_tab.screenshot({"path": "pytest_screenshots/after_update_dbs.png"}) people_in_group = await main_tab.querySelectorEval( "#groupStat", "(el) => {return el.innerHTML}" ) assert people_in_group == "You're in a group with Kendell0 and Kendell1." class TestAddToGroup(object): @pytest.mark.asyncio @pytest.mark.parametrize("use_keyboard", [True, False]) async def test_add_to_group_makenew(self, tabs_2, use_keyboard): for page_index, this_page in enumerate(tabs_2): await this_page.goto("http://127.0.0.1:5000") await this_page.bringToFront() assert "ClueCard | E-ScoreCard for game clues" == await this_page.title() await this_page.keyboard.type(f"Kendell{page_index}") await this_page.evaluate("document.querySelector('button').click()") await this_page.waitForNavigation({"waitUntil": "networkidle2"}) await this_page.evaluate("localStorage.clear()") main_tab = tabs_2[0] await main_tab.bringToFront() await main_tab.focus("#userId") await main_tab.keyboard.type(await tabs_2[1].evaluate("userIdString")) if use_keyboard: await main_tab.keyboard.type("\n") else: await main_tab.click("#addToGroup") # After adding image_path = "pytest_screenshots/add_to_group_" image_path += "with_keyboard" if use_keyboard else "with_button" await main_tab.bringToFront() await asyncio.sleep(0.5) await main_tab.screenshot({"path": image_path + ".png"}) people_in_group = await main_tab.querySelectorEval( "#groupStat", "(el) => {return el.innerHTML}" ) assert people_in_group == "You're in a group with Kendell0 and Kendell1." @pytest.mark.asyncio @pytest.mark.parametrize("tab_to_add", [[0, 2], [1, 2], [2, 0], [2, 1]]) async def test_add_to_existing_group(self, page, tabs_2, tab_to_add): all_tabs = tabs_2 + [page] for page_index, this_page in enumerate(all_tabs): await this_page.goto("http://127.0.0.1:5000") await this_page.bringToFront() await this_page.keyboard.type(f"Kendell{page_index}") await this_page.evaluate("document.querySelector('button').click()") await this_page.waitForNavigation({"waitUntil": "networkidle2"}) await this_page.evaluate("localStorage.clear()") await this_page.evaluate("document.hasFocus = () => {return true;}") # First add main_tab = all_tabs[0] await main_tab.bringToFront() user_id = await main_tab.querySelector("#userId") await main_tab.type("#userId", await all_tabs[1].evaluate("userIdString")) await main_tab.click("#addToGroup") await user_id.click(clickCount=3) # Second add main_tab = all_tabs[tab_to_add[0]] await main_tab.bringToFront() await main_tab.type( "#userId", await all_tabs[tab_to_add[1]].evaluate("userIdString") ) await main_tab.click("#addToGroup") # After adding image_path = "pytest_screenshots/add_to_existing_group" image_path += f"_{tab_to_add}" await main_tab.bringToFront() await asyncio.sleep(0.5) await main_tab.screenshot({"path": image_path + ".png"}) people_in_group = await main_tab.querySelectorEval( "#groupStat", "(el) => {return el.innerHTML}" ) assert ( people_in_group == "You're in a group with Kendell0, Kendell1 and Kendell2." ) class TestMetadata(object): @pytest.mark.asyncio async def test_ready_to_go(self, tabs_2): for index, tab in enumerate(tabs_2): await tab.goto("http://127.0.0.1:5000") await tab.evaluate("localStorage.clear()") await tab.goto("http://127.0.0.1:5000") await tab.bringToFront() await tab.keyboard.type(f"Kendell{index}") await tab.click("button") await tab.waitForNavigation({"waitUntil": "networkidle2"}) tab = tabs_2[0] await tab.bringToFront() user_id = await tab.querySelector("#userId") await tab.type("#userId", await tabs_2[1].evaluate("userIdString")) await tab.click("#addToGroup") await asyncio.sleep(0.5) await tab.click('label[for="imReady"]') await asyncio.sleep(0.5) await tab.screenshot({"path": "pytest_screenshots/ready_to_go_meta.png"}) people_in_group = await tab.querySelectorEval( "#groupStat", "(e) => {return e.innerHTML}" ) assert ( people_in_group == "You're in a group with Kendell0 (✅ is ready) and Kendell1." ) # TODO: Check whether the card buttons are accessible @pytest.mark.asyncio async def test_finished(self, tabs_2): # Getting the tabs ready for index, tab in enumerate(tabs_2): await tab.goto("http://127.0.0.1:5000") await tab.evaluate("localStorage.clear()") await tab.goto("http://127.0.0.1:5000") await tab.bringToFront() await tab.keyboard.type(f"Kendell{index}") await tab.click("button") await tab.waitForNavigation({"waitUntil": "networkidle2"}) # Adding them to group tab = tabs_2[0] await tab.bringToFront() user_id = await tab.querySelector("#userId") await tab.type("#userId", await tabs_2[1].evaluate("userIdString")) await tab.click("#addToGroup") await asyncio.sleep(0.5) # Marking them as ready await tab.click('label[for="imReady"]') await asyncio.sleep(0.5) await tabs_2[1].bringToFront() await tabs_2[1].click('label[for="imReady"]') await asyncio.sleep(0.5) await tab.bringToFront() await asyncio.sleep(10) # Doing the thing with open("groups.db") as group_file: group_database = load(group_file) for clue, is_correct in group_database[0][0].items(): if is_correct == "correct": correct_clue = clue for i in range(2): await tab.click(f'label[for="toggle-{correct_clue[0].lower()}"]') await tab.click(f'label[for="toggle-{correct_clue[1]}"]') await asyncio.sleep(0.2) await tab.click("#addClue") await asyncio.sleep(5) people_in_group = await tab.querySelectorEval( "#groupStat", "(e) => {return e.innerHTML}" ) assert ( people_in_group == "You're in a group with Kendell0 (🏁 finished) and Kendell1 (✅ is ready)." ) # TODO: Test backend seperately # TODO: Check every single line of code
en
0.730902
# Test every endpoint # Do initial add # New id # After adding # First add # Second add # After adding # TODO: Check whether the card buttons are accessible # Getting the tabs ready # Adding them to group # Marking them as ready # Doing the thing # TODO: Test backend seperately # TODO: Check every single line of code
2.179875
2
project_crudname/core/admin.py
omher1105/example-django-tests
0
6631452
from django.contrib import admin class AbstractChoiceAdmin(admin.ModelAdmin): """ Admin options for AbstractChoice abstract model. """ list_display = ['id', 'name', 'code'] class AuditAdminMixin: ordering = ['pk'] def get_merge_fields(self, origin_fields, fields): fields = list(fields) for field in origin_fields: if field not in fields: fields.append(field) return fields def get_all_fields(self, fields): audit_fields = ['is_active', 'creation_date', 'created_by', 'update_date', 'update_by'] return self.get_merge_fields(origin_fields=audit_fields, fields=fields) def get_all_readonly_fields(self, fields): audit_readonly_fields = ['creation_date', 'created_by', 'update_date', 'update_by'] return self.get_merge_fields(origin_fields=audit_readonly_fields, fields=fields) def get_fields(self, request, obj=None): fields = super(AuditAdminMixin, self).get_fields(request, obj) return self.get_all_fields(fields=fields) def get_readonly_fields(self, request, obj=None): readonly_fields = super(AuditAdminMixin, self).get_readonly_fields(request, obj=None) return self.get_all_readonly_fields(fields=readonly_fields)
from django.contrib import admin class AbstractChoiceAdmin(admin.ModelAdmin): """ Admin options for AbstractChoice abstract model. """ list_display = ['id', 'name', 'code'] class AuditAdminMixin: ordering = ['pk'] def get_merge_fields(self, origin_fields, fields): fields = list(fields) for field in origin_fields: if field not in fields: fields.append(field) return fields def get_all_fields(self, fields): audit_fields = ['is_active', 'creation_date', 'created_by', 'update_date', 'update_by'] return self.get_merge_fields(origin_fields=audit_fields, fields=fields) def get_all_readonly_fields(self, fields): audit_readonly_fields = ['creation_date', 'created_by', 'update_date', 'update_by'] return self.get_merge_fields(origin_fields=audit_readonly_fields, fields=fields) def get_fields(self, request, obj=None): fields = super(AuditAdminMixin, self).get_fields(request, obj) return self.get_all_fields(fields=fields) def get_readonly_fields(self, request, obj=None): readonly_fields = super(AuditAdminMixin, self).get_readonly_fields(request, obj=None) return self.get_all_readonly_fields(fields=readonly_fields)
en
0.397551
Admin options for AbstractChoice abstract model.
2.160959
2
_python/recursive_print_dict.py
luizeleno/pyjupiter
0
6631453
import re import unidecode def RecursivePrintDict(dictio, of, indent=0, start='- '): for k, v in dictio.items(): key = re.sub(r'\s+', '_', f'{k}') key = unidecode.unidecode(key) if isinstance(v, dict): of.write(' ' * indent + f'{start}{key}:\n') if k == 'requisitos' or k == 'oferecimento' or k == 'vagas' or k == 'aulas': RecursivePrintDict(v, of, indent+1) else: RecursivePrintDict(v, of, indent+1, start='') else: val = f'{v}'.replace('"', '') if 'docente' in k: of.write(' ' * indent + f'{start}{key}: {val}\n') else: of.write(' ' * indent + f'{start}{key}: "{val}"\n')
import re import unidecode def RecursivePrintDict(dictio, of, indent=0, start='- '): for k, v in dictio.items(): key = re.sub(r'\s+', '_', f'{k}') key = unidecode.unidecode(key) if isinstance(v, dict): of.write(' ' * indent + f'{start}{key}:\n') if k == 'requisitos' or k == 'oferecimento' or k == 'vagas' or k == 'aulas': RecursivePrintDict(v, of, indent+1) else: RecursivePrintDict(v, of, indent+1, start='') else: val = f'{v}'.replace('"', '') if 'docente' in k: of.write(' ' * indent + f'{start}{key}: {val}\n') else: of.write(' ' * indent + f'{start}{key}: "{val}"\n')
none
1
3.549197
4
librex/_symsets.py
matpuk/testjb
0
6631454
# # Symbol sets implementation # from typing import Text, Callable def sym_is_any(sym: Text) -> bool: return True def sym_is_digit(sym: Text) -> bool: return sym.isdigit() def sym_is_not_digit(sym: Text) -> bool: return not sym_is_digit(sym) def sym_is_space(sym: Text) -> bool: return sym.isspace() def sym_is_not_space(sym: Text) -> bool: return not sym_is_space(sym) def sym_is_alnum(sym: Text) -> bool: return sym.isalnum() or sym == '_' def sym_is_not_alnum(sym: Text) -> bool: return not sym_is_alnum(sym) _sets_map = { '.': sym_is_any, 'd': sym_is_digit, 'D': sym_is_not_digit, 's': sym_is_space, 'S': sym_is_not_space, 'w': sym_is_alnum, 'W': sym_is_not_alnum, } def get_symbol_set(sym: Text) -> Callable[[Text], bool]: if sym in _sets_map: return _sets_map[sym] raise ValueError(f'unknown symbol set type: {sym}')
# # Symbol sets implementation # from typing import Text, Callable def sym_is_any(sym: Text) -> bool: return True def sym_is_digit(sym: Text) -> bool: return sym.isdigit() def sym_is_not_digit(sym: Text) -> bool: return not sym_is_digit(sym) def sym_is_space(sym: Text) -> bool: return sym.isspace() def sym_is_not_space(sym: Text) -> bool: return not sym_is_space(sym) def sym_is_alnum(sym: Text) -> bool: return sym.isalnum() or sym == '_' def sym_is_not_alnum(sym: Text) -> bool: return not sym_is_alnum(sym) _sets_map = { '.': sym_is_any, 'd': sym_is_digit, 'D': sym_is_not_digit, 's': sym_is_space, 'S': sym_is_not_space, 'w': sym_is_alnum, 'W': sym_is_not_alnum, } def get_symbol_set(sym: Text) -> Callable[[Text], bool]: if sym in _sets_map: return _sets_map[sym] raise ValueError(f'unknown symbol set type: {sym}')
en
0.741542
# # Symbol sets implementation #
3.417306
3
tests/fixtures/dashboard.py
us88/LF_Flask-MonitoringDashboard
0
6631455
<filename>tests/fixtures/dashboard.py import pytest import pytz from flask import Flask import lemonadefashion_flask_monitoringdashboard @pytest.fixture def config(colors=None, group_by=None): lemonadefashion_flask_monitoringdashboard.config.colors = colors or {'endpoint': '[0, 1, 2]'} lemonadefashion_flask_monitoringdashboard.config.group_by = group_by lemonadefashion_flask_monitoringdashboard.config.timezone = pytz.timezone('UTC') return lemonadefashion_flask_monitoringdashboard.config @pytest.fixture def view_func(): return 'test' @pytest.fixture def dashboard(config, endpoint, view_func, rule='/'): app = Flask(__name__) app.add_url_rule(rule, endpoint=endpoint.name, view_func=lambda: view_func) lemonadefashion_flask_monitoringdashboard.bind(app, schedule=False) app.config['DEBUG'] = True app.config['TESTING'] = True with app.test_client() as client: yield client @pytest.fixture def dashboard_user(dashboard, user, config): """ Returns a testing application that can be used for testing the endpoints. """ dashboard.post('dashboard/login', data={'name': user.username, 'password': <PASSWORD>}) yield dashboard dashboard.post('dashboard/logout') @pytest.fixture def request_context(dashboard): with dashboard.application.test_request_context(): yield
<filename>tests/fixtures/dashboard.py import pytest import pytz from flask import Flask import lemonadefashion_flask_monitoringdashboard @pytest.fixture def config(colors=None, group_by=None): lemonadefashion_flask_monitoringdashboard.config.colors = colors or {'endpoint': '[0, 1, 2]'} lemonadefashion_flask_monitoringdashboard.config.group_by = group_by lemonadefashion_flask_monitoringdashboard.config.timezone = pytz.timezone('UTC') return lemonadefashion_flask_monitoringdashboard.config @pytest.fixture def view_func(): return 'test' @pytest.fixture def dashboard(config, endpoint, view_func, rule='/'): app = Flask(__name__) app.add_url_rule(rule, endpoint=endpoint.name, view_func=lambda: view_func) lemonadefashion_flask_monitoringdashboard.bind(app, schedule=False) app.config['DEBUG'] = True app.config['TESTING'] = True with app.test_client() as client: yield client @pytest.fixture def dashboard_user(dashboard, user, config): """ Returns a testing application that can be used for testing the endpoints. """ dashboard.post('dashboard/login', data={'name': user.username, 'password': <PASSWORD>}) yield dashboard dashboard.post('dashboard/logout') @pytest.fixture def request_context(dashboard): with dashboard.application.test_request_context(): yield
en
0.863306
Returns a testing application that can be used for testing the endpoints.
2.146729
2
test/other/generate_pairwise_relative_data.py
xuebingwu/xtools
0
6631456
<filename>test/other/generate_pairwise_relative_data.py l2n = {'A':1.0, 'C':2.0, 'G':3.0, 'T':4.0} f=open('') for line in f: flds = line.strip().split() line2 = f.readline() flds2 = line2.strip().split() print flds[0]+"\t"+str(l2n[flds[0][29]]/l2n[flds2[0][29]]) print flds[0]+"\t1.0" f.close()
<filename>test/other/generate_pairwise_relative_data.py l2n = {'A':1.0, 'C':2.0, 'G':3.0, 'T':4.0} f=open('') for line in f: flds = line.strip().split() line2 = f.readline() flds2 = line2.strip().split() print flds[0]+"\t"+str(l2n[flds[0][29]]/l2n[flds2[0][29]]) print flds[0]+"\t1.0" f.close()
none
1
2.606705
3
core/cooggerapp/views/home.py
bisguzar/coogger
0
6631457
<reponame>bisguzar/coogger from django.conf import settings from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.models import User from django.db.models import Q from django.http import Http404 from django.shortcuts import get_object_or_404, redirect, render from django.urls import resolve, reverse from django.views import View from django.views.generic import TemplateView from ..forms import ReportsForm from ..models import Content, Issue, ReportModel, SearchedWords, Topic from .utils import paginator class Home(TemplateView): template_name = "card/blogs.html" introduction_template_name = "home/introduction.html" def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) self.url_name = resolve(self.request.path_info).url_name self.is_authenticated = self.request.user.is_authenticated if not self.is_authenticated and self.url_name == "home": self.template_name = self.introduction_template_name context["introduction"] = True how_many = 3 * 8 queryset = User.objects.all().order_by("-date_joined")[:how_many] context["queryset"] = paginator(self.request, queryset, how_many) else: queryset = Content.objects.filter(status="ready") context["queryset"] = paginator(self.request, queryset) context["sort_topics"] = self.sort_topics() # TODO just pc context["issues"] = Issue.objects.filter(status="open")[: settings.PAGE_SIZE] context["insection_left"] = True context["insection_right"] = True return context @staticmethod def sort_topics(): topics = list() for topic in Topic.objects.all(): if (topic not in topics) and (len(topics) <= 30) and (not topic.editable): topics.append(topic) return topics class Report(LoginRequiredMixin, View): form_class = ReportsForm template_name = "home/report.html" def get(self, request, content_id, *args, **kwargs): if request.is_ajax(): report_form = self.form_class() context = dict(report_form=report_form, content_id=content_id) return render(request, self.template_name, context) raise Http404 def post(self, request, content_id, *args, **kwargs): report_form = self.form_class(request.POST) if report_form.is_valid(): content = Content.objects.get(id=content_id) if ReportModel.objects.filter(user=request.user, content=content).exists(): messages.error(request, "Your complaint is in the evaluation process.") return redirect(reverse("home")) report_form = report_form.save(commit=False) report_form.user = request.user report_form.content = content report_form.save() messages.error(request, "Your complaint has been received.") return redirect(reverse("home")) return HttpResponse(self.get(request, *args, **kwargs)) class Search(Home): content_search_template_name = "home/search/content.html" user_search_template_name = "home/search/user.html" def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["queryset"] = paginator( self.request, self.get_queryset(), self.get_how_many() ) return context def get_queryset(self): name = self.request.GET["query"].lower() SearchedWords(word=name).save() if name.startswith("@"): name = name[1:] return User.objects.filter( Q(username__contains=name) | Q(first_name__contains=name) | Q(last_name__contains=name) ) return Content.objects.filter(Q(title__contains=name) & Q(status="ready")) def get_template_names(self): name = self.request.GET["query"].lower() if name.startswith("@"): return [self.user_search_template_name] return [self.content_search_template_name] def get_how_many(self): name = self.request.GET["query"].lower() if name.startswith("@"): return 30 return settings.PAGE_SIZE class Feed(Home): # TODO this class must be improved # make a new model for this op template_name = "card/blogs.html" def get_context_data(self, username, **kwargs): self.username = username context = super().get_context_data(**kwargs) return context def get_queryset(self): following = list( get_object_or_404(User, username=self.username).follow.following.all() ) queryset = list() contents = Content.objects.filter(status="ready") for user in following: queryset += contents.filter(user=user) queryset = sorted(queryset, reverse=True, key=lambda instance: instance.created) return queryset
from django.conf import settings from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.models import User from django.db.models import Q from django.http import Http404 from django.shortcuts import get_object_or_404, redirect, render from django.urls import resolve, reverse from django.views import View from django.views.generic import TemplateView from ..forms import ReportsForm from ..models import Content, Issue, ReportModel, SearchedWords, Topic from .utils import paginator class Home(TemplateView): template_name = "card/blogs.html" introduction_template_name = "home/introduction.html" def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) self.url_name = resolve(self.request.path_info).url_name self.is_authenticated = self.request.user.is_authenticated if not self.is_authenticated and self.url_name == "home": self.template_name = self.introduction_template_name context["introduction"] = True how_many = 3 * 8 queryset = User.objects.all().order_by("-date_joined")[:how_many] context["queryset"] = paginator(self.request, queryset, how_many) else: queryset = Content.objects.filter(status="ready") context["queryset"] = paginator(self.request, queryset) context["sort_topics"] = self.sort_topics() # TODO just pc context["issues"] = Issue.objects.filter(status="open")[: settings.PAGE_SIZE] context["insection_left"] = True context["insection_right"] = True return context @staticmethod def sort_topics(): topics = list() for topic in Topic.objects.all(): if (topic not in topics) and (len(topics) <= 30) and (not topic.editable): topics.append(topic) return topics class Report(LoginRequiredMixin, View): form_class = ReportsForm template_name = "home/report.html" def get(self, request, content_id, *args, **kwargs): if request.is_ajax(): report_form = self.form_class() context = dict(report_form=report_form, content_id=content_id) return render(request, self.template_name, context) raise Http404 def post(self, request, content_id, *args, **kwargs): report_form = self.form_class(request.POST) if report_form.is_valid(): content = Content.objects.get(id=content_id) if ReportModel.objects.filter(user=request.user, content=content).exists(): messages.error(request, "Your complaint is in the evaluation process.") return redirect(reverse("home")) report_form = report_form.save(commit=False) report_form.user = request.user report_form.content = content report_form.save() messages.error(request, "Your complaint has been received.") return redirect(reverse("home")) return HttpResponse(self.get(request, *args, **kwargs)) class Search(Home): content_search_template_name = "home/search/content.html" user_search_template_name = "home/search/user.html" def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["queryset"] = paginator( self.request, self.get_queryset(), self.get_how_many() ) return context def get_queryset(self): name = self.request.GET["query"].lower() SearchedWords(word=name).save() if name.startswith("@"): name = name[1:] return User.objects.filter( Q(username__contains=name) | Q(first_name__contains=name) | Q(last_name__contains=name) ) return Content.objects.filter(Q(title__contains=name) & Q(status="ready")) def get_template_names(self): name = self.request.GET["query"].lower() if name.startswith("@"): return [self.user_search_template_name] return [self.content_search_template_name] def get_how_many(self): name = self.request.GET["query"].lower() if name.startswith("@"): return 30 return settings.PAGE_SIZE class Feed(Home): # TODO this class must be improved # make a new model for this op template_name = "card/blogs.html" def get_context_data(self, username, **kwargs): self.username = username context = super().get_context_data(**kwargs) return context def get_queryset(self): following = list( get_object_or_404(User, username=self.username).follow.following.all() ) queryset = list() contents = Content.objects.filter(status="ready") for user in following: queryset += contents.filter(user=user) queryset = sorted(queryset, reverse=True, key=lambda instance: instance.created) return queryset
en
0.81002
# TODO just pc # TODO this class must be improved # make a new model for this op
1.998434
2
sync_dl_ytapi/commands.py
PrinceOfPuppers/sync-dl-ytapi
0
6631458
import os import sys import shelve import sync_dl.config as cfg from sync_dl_ytapi.helpers import getPlId,pushOrderMoves from sync_dl_ytapi.credentials import getCredentials,revokeTokens from sync_dl_ytapi.ytapiWrappers import getItemIds,moveSong # actual commands def pushLocalOrder(plPath): credJson = getCredentials() if not credJson: return cfg.logger.info("Pushing Local Order to Remote...") with shelve.open(f"{plPath}/{cfg.metaDataName}", 'c',writeback=True) as metaData: url = metaData["url"] localIds = metaData["ids"] plId = getPlId(url) remoteIdPairs = getItemIds(credJson,plId) remoteIds,remoteItemIds = zip(*remoteIdPairs) cfg.logger.debug(f'Order Before Push: \n'+'\n'.join( [f'{i}: {str(remoteId)}' for i,remoteId in enumerate(remoteIds) ] )) moves = pushOrderMoves(remoteIds,remoteItemIds,localIds) for move in moves: newIndex, songId,itemId = move moveSong(credJson,plId,songId,itemId,newIndex) def logout(): revokeTokens()
import os import sys import shelve import sync_dl.config as cfg from sync_dl_ytapi.helpers import getPlId,pushOrderMoves from sync_dl_ytapi.credentials import getCredentials,revokeTokens from sync_dl_ytapi.ytapiWrappers import getItemIds,moveSong # actual commands def pushLocalOrder(plPath): credJson = getCredentials() if not credJson: return cfg.logger.info("Pushing Local Order to Remote...") with shelve.open(f"{plPath}/{cfg.metaDataName}", 'c',writeback=True) as metaData: url = metaData["url"] localIds = metaData["ids"] plId = getPlId(url) remoteIdPairs = getItemIds(credJson,plId) remoteIds,remoteItemIds = zip(*remoteIdPairs) cfg.logger.debug(f'Order Before Push: \n'+'\n'.join( [f'{i}: {str(remoteId)}' for i,remoteId in enumerate(remoteIds) ] )) moves = pushOrderMoves(remoteIds,remoteItemIds,localIds) for move in moves: newIndex, songId,itemId = move moveSong(credJson,plId,songId,itemId,newIndex) def logout(): revokeTokens()
es
0.774279
# actual commands
2.115407
2
tests/unit/utils/test_base_api_client.py
primitybio/cellengine-python-toolk
4
6631459
<gh_stars>1-10 import pytest import requests import responses from cellengine.utils.api_client.APIError import APIError MOCK_DATA = {"data": "some fake data"} BASE_URL = "http://fake/" SESSION = requests.Session() def test_api_client(client): assert client._API_NAME == "CellEngine Python Toolkit" @responses.activate def test_should_get_raw(client): responses.add(responses.GET, BASE_URL + "test", json=MOCK_DATA) res = client._get("http://fake/test") assert res == MOCK_DATA @responses.activate def test_should_raise_on_empty_response(client): responses.add(responses.GET, BASE_URL + "test") with pytest.raises(APIError, match=r"200.*JSONDecodeError"): assert "" == client._get("http://fake/test") @responses.activate def test_should_raise_custom_message(client): mock_data = {"error": {"message": "some error"}} responses.add(responses.GET, BASE_URL + "test", status=500, json=mock_data) with pytest.raises(APIError, match=r"500.*some error"): assert client._get("http://fake/test") == "" @responses.activate def test_should_get(client): responses.add(responses.GET, BASE_URL + "test", json=MOCK_DATA) assert client._get("http://fake/test") == {"data": "some fake data"} @responses.activate def test_should_post(client): body = {"some": "body"} responses.add(responses.POST, BASE_URL + "test", json=body) assert client._post("http://fake/test", body) == {"some": "body"} @responses.activate def test_should_patch(client): body = {"some": "body"} responses.add(responses.PATCH, BASE_URL + "test", json=body) assert client._patch("http://fake/test", body) == {"some": "body"} @responses.activate def test_should_delete(client): responses.add(responses.DELETE, BASE_URL + "test", json="deleted") assert client._delete("http://fake/test") == b'"deleted"'
import pytest import requests import responses from cellengine.utils.api_client.APIError import APIError MOCK_DATA = {"data": "some fake data"} BASE_URL = "http://fake/" SESSION = requests.Session() def test_api_client(client): assert client._API_NAME == "CellEngine Python Toolkit" @responses.activate def test_should_get_raw(client): responses.add(responses.GET, BASE_URL + "test", json=MOCK_DATA) res = client._get("http://fake/test") assert res == MOCK_DATA @responses.activate def test_should_raise_on_empty_response(client): responses.add(responses.GET, BASE_URL + "test") with pytest.raises(APIError, match=r"200.*JSONDecodeError"): assert "" == client._get("http://fake/test") @responses.activate def test_should_raise_custom_message(client): mock_data = {"error": {"message": "some error"}} responses.add(responses.GET, BASE_URL + "test", status=500, json=mock_data) with pytest.raises(APIError, match=r"500.*some error"): assert client._get("http://fake/test") == "" @responses.activate def test_should_get(client): responses.add(responses.GET, BASE_URL + "test", json=MOCK_DATA) assert client._get("http://fake/test") == {"data": "some fake data"} @responses.activate def test_should_post(client): body = {"some": "body"} responses.add(responses.POST, BASE_URL + "test", json=body) assert client._post("http://fake/test", body) == {"some": "body"} @responses.activate def test_should_patch(client): body = {"some": "body"} responses.add(responses.PATCH, BASE_URL + "test", json=body) assert client._patch("http://fake/test", body) == {"some": "body"} @responses.activate def test_should_delete(client): responses.add(responses.DELETE, BASE_URL + "test", json="deleted") assert client._delete("http://fake/test") == b'"deleted"'
none
1
2.328032
2
lightly/embedding/embedding.py
shruti-shyam/lightly
0
6631460
""" Embedding Strategies """ # Copyright (c) 2020. Lightly AG and its affiliates. # All Rights Reserved import time import torch import lightly from lightly.embedding._base import BaseEmbedding from tqdm import tqdm if lightly._is_prefetch_generator_available(): from prefetch_generator import BackgroundGenerator class SelfSupervisedEmbedding(BaseEmbedding): """Implementation of self-supervised embedding models. Implements an embedding strategy based on self-supervised learning. A model backbone, self-supervised criterion, optimizer, and dataloader are passed to the constructor. The embedding itself is a pytorch-lightning module which can be trained very easily: https://pytorch-lightning.readthedocs.io/en/stable/ The implementation is based on contrastive learning. SimCLR: https://arxiv.org/abs/2002.05709 MoCo: https://arxiv.org/abs/1911.05722 Attributes: model: A backbone convolutional network with a projection head. criterion: A contrastive loss function. optimizer: A PyTorch optimizer. dataloader: A torchvision dataloader. scheduler: A PyTorch learning rate scheduler. Examples: >>> # define a model, criterion, optimizer, and dataloader above >>> import lightly.embedding as embedding >>> encoder = SelfSupervisedEmbedding( >>> model, >>> criterion, >>> optimizer, >>> dataloader, >>> ) >>> # train the self-supervised embedding with default settings >>> encoder.train_embedding() >>> # pass pytorch-lightning trainer arguments as kwargs >>> encoder.train_embedding(max_epochs=10) """ def __init__(self, model: torch.nn.Module, criterion: torch.nn.Module, optimizer: torch.optim.Optimizer, dataloader: torch.utils.data.DataLoader, scheduler=None): super(SelfSupervisedEmbedding, self).__init__( model, criterion, optimizer, dataloader, scheduler) def embed(self, dataloader: torch.utils.data.DataLoader, device: torch.device = None, to_numpy: bool = True): """Embeds images in a vector space. Args: dataloader: A torchvision dataloader. device: Selected device (see PyTorch documentation) to_numpy: Whether to return the embeddings as numpy array. Returns: A tuple consisting of a tensor or ndarray of embeddings with shape n_images x num_ftrs and labels, fnames Examples: >>> # embed images in vector space >>> embeddings, labels, fnames = encoder.embed(dataloader) """ self.model.eval() embeddings, labels, fnames = None, None, [] if lightly._is_prefetch_generator_available(): pbar = tqdm(BackgroundGenerator(dataloader, max_prefetch=3), total=len(dataloader)) else: pbar = tqdm(dataloader, total=len(dataloader)) efficiency = 0. embeddings = [] labels = [] with torch.no_grad(): start_time = time.time() for (img, label, fname) in pbar: img = img.to(device) label = label.to(device) fnames += [*fname] batch_size = img.shape[0] prepare_time = time.time() emb = self.model.backbone(img) emb = emb.detach().reshape(batch_size, -1) embeddings.append(emb) labels.append(label) process_time = time.time() efficiency = \ (process_time - prepare_time) / (process_time - start_time) pbar.set_description( "Compute efficiency: {:.2f}".format(efficiency)) start_time = time.time() embeddings = torch.cat(embeddings, 0) labels = torch.cat(labels, 0) if to_numpy: embeddings = embeddings.cpu().numpy() labels = labels.cpu().numpy() return embeddings, labels, fnames
""" Embedding Strategies """ # Copyright (c) 2020. Lightly AG and its affiliates. # All Rights Reserved import time import torch import lightly from lightly.embedding._base import BaseEmbedding from tqdm import tqdm if lightly._is_prefetch_generator_available(): from prefetch_generator import BackgroundGenerator class SelfSupervisedEmbedding(BaseEmbedding): """Implementation of self-supervised embedding models. Implements an embedding strategy based on self-supervised learning. A model backbone, self-supervised criterion, optimizer, and dataloader are passed to the constructor. The embedding itself is a pytorch-lightning module which can be trained very easily: https://pytorch-lightning.readthedocs.io/en/stable/ The implementation is based on contrastive learning. SimCLR: https://arxiv.org/abs/2002.05709 MoCo: https://arxiv.org/abs/1911.05722 Attributes: model: A backbone convolutional network with a projection head. criterion: A contrastive loss function. optimizer: A PyTorch optimizer. dataloader: A torchvision dataloader. scheduler: A PyTorch learning rate scheduler. Examples: >>> # define a model, criterion, optimizer, and dataloader above >>> import lightly.embedding as embedding >>> encoder = SelfSupervisedEmbedding( >>> model, >>> criterion, >>> optimizer, >>> dataloader, >>> ) >>> # train the self-supervised embedding with default settings >>> encoder.train_embedding() >>> # pass pytorch-lightning trainer arguments as kwargs >>> encoder.train_embedding(max_epochs=10) """ def __init__(self, model: torch.nn.Module, criterion: torch.nn.Module, optimizer: torch.optim.Optimizer, dataloader: torch.utils.data.DataLoader, scheduler=None): super(SelfSupervisedEmbedding, self).__init__( model, criterion, optimizer, dataloader, scheduler) def embed(self, dataloader: torch.utils.data.DataLoader, device: torch.device = None, to_numpy: bool = True): """Embeds images in a vector space. Args: dataloader: A torchvision dataloader. device: Selected device (see PyTorch documentation) to_numpy: Whether to return the embeddings as numpy array. Returns: A tuple consisting of a tensor or ndarray of embeddings with shape n_images x num_ftrs and labels, fnames Examples: >>> # embed images in vector space >>> embeddings, labels, fnames = encoder.embed(dataloader) """ self.model.eval() embeddings, labels, fnames = None, None, [] if lightly._is_prefetch_generator_available(): pbar = tqdm(BackgroundGenerator(dataloader, max_prefetch=3), total=len(dataloader)) else: pbar = tqdm(dataloader, total=len(dataloader)) efficiency = 0. embeddings = [] labels = [] with torch.no_grad(): start_time = time.time() for (img, label, fname) in pbar: img = img.to(device) label = label.to(device) fnames += [*fname] batch_size = img.shape[0] prepare_time = time.time() emb = self.model.backbone(img) emb = emb.detach().reshape(batch_size, -1) embeddings.append(emb) labels.append(label) process_time = time.time() efficiency = \ (process_time - prepare_time) / (process_time - start_time) pbar.set_description( "Compute efficiency: {:.2f}".format(efficiency)) start_time = time.time() embeddings = torch.cat(embeddings, 0) labels = torch.cat(labels, 0) if to_numpy: embeddings = embeddings.cpu().numpy() labels = labels.cpu().numpy() return embeddings, labels, fnames
en
0.701483
Embedding Strategies # Copyright (c) 2020. Lightly AG and its affiliates. # All Rights Reserved Implementation of self-supervised embedding models. Implements an embedding strategy based on self-supervised learning. A model backbone, self-supervised criterion, optimizer, and dataloader are passed to the constructor. The embedding itself is a pytorch-lightning module which can be trained very easily: https://pytorch-lightning.readthedocs.io/en/stable/ The implementation is based on contrastive learning. SimCLR: https://arxiv.org/abs/2002.05709 MoCo: https://arxiv.org/abs/1911.05722 Attributes: model: A backbone convolutional network with a projection head. criterion: A contrastive loss function. optimizer: A PyTorch optimizer. dataloader: A torchvision dataloader. scheduler: A PyTorch learning rate scheduler. Examples: >>> # define a model, criterion, optimizer, and dataloader above >>> import lightly.embedding as embedding >>> encoder = SelfSupervisedEmbedding( >>> model, >>> criterion, >>> optimizer, >>> dataloader, >>> ) >>> # train the self-supervised embedding with default settings >>> encoder.train_embedding() >>> # pass pytorch-lightning trainer arguments as kwargs >>> encoder.train_embedding(max_epochs=10) Embeds images in a vector space. Args: dataloader: A torchvision dataloader. device: Selected device (see PyTorch documentation) to_numpy: Whether to return the embeddings as numpy array. Returns: A tuple consisting of a tensor or ndarray of embeddings with shape n_images x num_ftrs and labels, fnames Examples: >>> # embed images in vector space >>> embeddings, labels, fnames = encoder.embed(dataloader)
2.515184
3
hmlvaraus/migrations/0001_initial.py
haltu/hmlvaraus-backend
1
6631461
<filename>hmlvaraus/migrations/0001_initial.py<gh_stars>1-10 # -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2017-05-09 05:10 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('resources', '0051_auto_20170509_0758'), ] operations = [ migrations.CreateModel( name='Berth', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('width_cm', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Berth width')), ('depth_cm', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Berth depth')), ('length_cm', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Berth length')), ('type', models.CharField(choices=[('dock', 'dock'), ('ground', 'ground'), ('number', 'number')], default='dock', max_length=20, verbose_name='Berth type')), ('resource', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='resources.Resource', verbose_name='Resource')), ], ), ]
<filename>hmlvaraus/migrations/0001_initial.py<gh_stars>1-10 # -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2017-05-09 05:10 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('resources', '0051_auto_20170509_0758'), ] operations = [ migrations.CreateModel( name='Berth', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('width_cm', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Berth width')), ('depth_cm', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Berth depth')), ('length_cm', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Berth length')), ('type', models.CharField(choices=[('dock', 'dock'), ('ground', 'ground'), ('number', 'number')], default='dock', max_length=20, verbose_name='Berth type')), ('resource', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='resources.Resource', verbose_name='Resource')), ], ), ]
en
0.768267
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2017-05-09 05:10
1.589183
2
src/pyaides/functools/__init__.py
okomestudio/pyaides
0
6631462
from .cache import cachetofile # noqa
from .cache import cachetofile # noqa
none
1
1.01291
1
modules/rltrain.py
SaeedNajafi/tagger
9
6631463
<filename>modules/rltrain.py from itertools import * import torch import numpy as np import torch.nn as nn from torch.nn import init from torch.autograd import Variable hasCuda = torch.cuda.is_available() class RLTrain(nn.Module): """ This module applies biased actor-critic training to the decoder RNN. """ def __init__(self, cfg): super(RLTrain, self).__init__() self.cfg = cfg #Critic: self.cr_size = cfg.w_rnn_units + cfg.dec_rnn_units self.layer1 = nn.Linear( self.cr_size, self.cr_size, bias=True ) self.layer2 = nn.Linear( self.cr_size, self.cr_size, bias=True ) self.layer3 = nn.Linear( self.cr_size, 1, bias=True ) self.param_init() return def param_init(self): for name, param in self.named_parameters(): if 'bias' in name: init.constant(param, 0.0) if 'weight' in name: init.xavier_uniform(param) return #Critic approximates the state-value function of a state. def V(self, S): #Do not back propagate through S! in_S = Variable(S.data.cuda(), requires_grad=False) if hasCuda else Variable(S.data, requires_grad=False) cfg = self.cfg l = cfg.gamma if l>=1 or l<0: print "INFO: 0 <= discount factor < 1 !" exit() #We do not apply any dropout layer as this is a regression model #and the optimizer will apply L2 regularization on the weights. H1 = nn.functional.leaky_relu(self.layer1(in_S)) H2 = nn.functional.leaky_relu(self.layer2(H1)) H3 = nn.functional.sigmoid(self.layer3(H2)) #H3 is now scaler between 0 and 1 v = torch.div(H3, 1.0-l) #v is now scaler between 0 and 1.0-l which are the boundries for returns w.r.t. l and 0/1 rewards. return v.view(cfg.d_batch_size, cfg.max_s_len) #least square loss for V. #L2 regularization will be done by optimizer. def V_loss(self, Returns, prev_V): """ Returns are the temporal difference returns calculated for each step. They are the target regression values for the Critic V. prev_V is the previous estimates of the Critic V for the returns. We want to minimize the Mean Squared Error between Returns and prev_V. """ cfg = self.cfg #Do not back propagate through Returns! in_Returns = Variable(Returns.data.cuda(), requires_grad=False) if hasCuda else Variable(Returns.data, requires_grad=False) #mask pads w_mask = Variable(cfg.B['w_mask'].cuda()) if hasCuda else Variable(cfg.B['w_mask']) #No negative, this is MSE loss MSEloss = torch.mean(torch.mean(torch.pow(prev_V-in_Returns, 2.0) * w_mask, dim=1), dim=0) #MSEloss will be plugged in a separate optimizer. return MSEloss def forward(self, H, mldecoder): cfg = self.cfg dec_rnn = mldecoder.dec_rnn affine = mldecoder.affine tag_em = mldecoder.tag_em #zero the pad vector tag_em.weight.data[cfg.tag_pad_id].fill_(0.0) #Create a variable for initial hidden vector of RNN. zeros = torch.zeros(cfg.d_batch_size, cfg.dec_rnn_units) h0 = Variable(zeros.cuda()) if hasCuda else Variable(zeros) #Create a variable for the initial previous tag. zeros = torch.zeros(cfg.d_batch_size, cfg.tag_em_size) Go_symbol = Variable(zeros.cuda()) if hasCuda else Variable(zeros) #critic V estimates states = [] taken_actions = [] action_log_policies = [] for i in range(cfg.max_s_len): H_i = H[:,i,:] if i==0: prev_output = Go_symbol h = h0 c = h0 input = torch.cat((prev_output, H_i), dim=1) output, c = dec_rnn(input, (h, c)) output_H = torch.cat((output, H_i), dim=1) states.append(output_H) score = affine(output_H) #For the next step h = output log_p, gen_idx = nn.functional.log_softmax(score, dim=1).max(dim=1) prev_output = tag_em(gen_idx) taken_actions.append(gen_idx) action_log_policies.append(log_p) S = torch.stack(states, dim=1) V_es = self.V(S) taken_actions = torch.stack(taken_actions, dim=1) action_log_policies = torch.stack(action_log_policies, dim=1) type = cfg.rltrain_type if type=='AC': return self.Actor_Critic(V_es, taken_actions, action_log_policies) else: print "INFO: RLTrain type error!" exit() return None, None def Actor_Critic(self, V_es, taken_actions, action_log_policies): cfg = self.cfg l = cfg.gamma n = cfg.n_step if n<0: print "INFO: 1 <= n step !" exit() #Building gamma matrix to calculate return for each step. powers = np.arange(cfg.max_s_len) bases = np.full((1,cfg.max_s_len), l) rows = np.power(bases, powers) inverse_rows = 1.0/rows inverse_cols = inverse_rows.reshape((cfg.max_s_len,1)) gammaM = np.tril(np.triu(np.multiply(inverse_cols, rows)), k=n-1) gM_tensor = torch.from_numpy(gammaM.T).float() """ for n = 3, gamma=0.9 gM_tensor: array( [[1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0.9 , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0.81, 0.9 , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0.81, 0.9 , 1. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0.81, 0.9 , 1. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0.81, 0.9 , 1. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0.81, 0.9 , 1. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0.81, 0.9 , 1. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0.81, 0.9 , 1. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.81, 0.9 , 1. ] ]) """ if hasCuda: gM = Variable(gM_tensor.cuda(), requires_grad=False) else: gM = Variable(gM_tensor, requires_grad=False) tag = Variable(cfg.B['tag'].cuda()) if hasCuda else Variable(cfg.B['tag']) w_mask = Variable(cfg.B['w_mask'].cuda()) if hasCuda else Variable(cfg.B['w_mask']) is_true_tag = torch.eq(taken_actions, tag) #0/1 reward (hamming loss) for each prediction. rewards = is_true_tag.float() * w_mask V_es = V_es * w_mask Returns = torch.matmul(rewards, gM) for i in range(cfg.max_s_len-n): Returns[:,i].data = Returns[:,i].data + (l ** n) * V_es[:, i + n].data advantages = Returns - V_es pos_neq = torch.ge(advantages, 0.0).float() signs = torch.eq(pos_neq, rewards).float() #Do not back propagate through Returns and V_es! biased_advantages = signs * advantages if hasCuda: deltas = Variable(biased_advantages.data.cuda(), requires_grad=False) else: deltas = Variable(biased_advantages.data, requires_grad=False) rlloss = -torch.mean(torch.mean(action_log_policies * deltas * w_mask, dim=1), dim=0) vloss = self.V_loss(Returns, V_es) return rlloss, vloss
<filename>modules/rltrain.py from itertools import * import torch import numpy as np import torch.nn as nn from torch.nn import init from torch.autograd import Variable hasCuda = torch.cuda.is_available() class RLTrain(nn.Module): """ This module applies biased actor-critic training to the decoder RNN. """ def __init__(self, cfg): super(RLTrain, self).__init__() self.cfg = cfg #Critic: self.cr_size = cfg.w_rnn_units + cfg.dec_rnn_units self.layer1 = nn.Linear( self.cr_size, self.cr_size, bias=True ) self.layer2 = nn.Linear( self.cr_size, self.cr_size, bias=True ) self.layer3 = nn.Linear( self.cr_size, 1, bias=True ) self.param_init() return def param_init(self): for name, param in self.named_parameters(): if 'bias' in name: init.constant(param, 0.0) if 'weight' in name: init.xavier_uniform(param) return #Critic approximates the state-value function of a state. def V(self, S): #Do not back propagate through S! in_S = Variable(S.data.cuda(), requires_grad=False) if hasCuda else Variable(S.data, requires_grad=False) cfg = self.cfg l = cfg.gamma if l>=1 or l<0: print "INFO: 0 <= discount factor < 1 !" exit() #We do not apply any dropout layer as this is a regression model #and the optimizer will apply L2 regularization on the weights. H1 = nn.functional.leaky_relu(self.layer1(in_S)) H2 = nn.functional.leaky_relu(self.layer2(H1)) H3 = nn.functional.sigmoid(self.layer3(H2)) #H3 is now scaler between 0 and 1 v = torch.div(H3, 1.0-l) #v is now scaler between 0 and 1.0-l which are the boundries for returns w.r.t. l and 0/1 rewards. return v.view(cfg.d_batch_size, cfg.max_s_len) #least square loss for V. #L2 regularization will be done by optimizer. def V_loss(self, Returns, prev_V): """ Returns are the temporal difference returns calculated for each step. They are the target regression values for the Critic V. prev_V is the previous estimates of the Critic V for the returns. We want to minimize the Mean Squared Error between Returns and prev_V. """ cfg = self.cfg #Do not back propagate through Returns! in_Returns = Variable(Returns.data.cuda(), requires_grad=False) if hasCuda else Variable(Returns.data, requires_grad=False) #mask pads w_mask = Variable(cfg.B['w_mask'].cuda()) if hasCuda else Variable(cfg.B['w_mask']) #No negative, this is MSE loss MSEloss = torch.mean(torch.mean(torch.pow(prev_V-in_Returns, 2.0) * w_mask, dim=1), dim=0) #MSEloss will be plugged in a separate optimizer. return MSEloss def forward(self, H, mldecoder): cfg = self.cfg dec_rnn = mldecoder.dec_rnn affine = mldecoder.affine tag_em = mldecoder.tag_em #zero the pad vector tag_em.weight.data[cfg.tag_pad_id].fill_(0.0) #Create a variable for initial hidden vector of RNN. zeros = torch.zeros(cfg.d_batch_size, cfg.dec_rnn_units) h0 = Variable(zeros.cuda()) if hasCuda else Variable(zeros) #Create a variable for the initial previous tag. zeros = torch.zeros(cfg.d_batch_size, cfg.tag_em_size) Go_symbol = Variable(zeros.cuda()) if hasCuda else Variable(zeros) #critic V estimates states = [] taken_actions = [] action_log_policies = [] for i in range(cfg.max_s_len): H_i = H[:,i,:] if i==0: prev_output = Go_symbol h = h0 c = h0 input = torch.cat((prev_output, H_i), dim=1) output, c = dec_rnn(input, (h, c)) output_H = torch.cat((output, H_i), dim=1) states.append(output_H) score = affine(output_H) #For the next step h = output log_p, gen_idx = nn.functional.log_softmax(score, dim=1).max(dim=1) prev_output = tag_em(gen_idx) taken_actions.append(gen_idx) action_log_policies.append(log_p) S = torch.stack(states, dim=1) V_es = self.V(S) taken_actions = torch.stack(taken_actions, dim=1) action_log_policies = torch.stack(action_log_policies, dim=1) type = cfg.rltrain_type if type=='AC': return self.Actor_Critic(V_es, taken_actions, action_log_policies) else: print "INFO: RLTrain type error!" exit() return None, None def Actor_Critic(self, V_es, taken_actions, action_log_policies): cfg = self.cfg l = cfg.gamma n = cfg.n_step if n<0: print "INFO: 1 <= n step !" exit() #Building gamma matrix to calculate return for each step. powers = np.arange(cfg.max_s_len) bases = np.full((1,cfg.max_s_len), l) rows = np.power(bases, powers) inverse_rows = 1.0/rows inverse_cols = inverse_rows.reshape((cfg.max_s_len,1)) gammaM = np.tril(np.triu(np.multiply(inverse_cols, rows)), k=n-1) gM_tensor = torch.from_numpy(gammaM.T).float() """ for n = 3, gamma=0.9 gM_tensor: array( [[1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0.9 , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0.81, 0.9 , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0.81, 0.9 , 1. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0.81, 0.9 , 1. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0.81, 0.9 , 1. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0.81, 0.9 , 1. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0.81, 0.9 , 1. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0.81, 0.9 , 1. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.81, 0.9 , 1. ] ]) """ if hasCuda: gM = Variable(gM_tensor.cuda(), requires_grad=False) else: gM = Variable(gM_tensor, requires_grad=False) tag = Variable(cfg.B['tag'].cuda()) if hasCuda else Variable(cfg.B['tag']) w_mask = Variable(cfg.B['w_mask'].cuda()) if hasCuda else Variable(cfg.B['w_mask']) is_true_tag = torch.eq(taken_actions, tag) #0/1 reward (hamming loss) for each prediction. rewards = is_true_tag.float() * w_mask V_es = V_es * w_mask Returns = torch.matmul(rewards, gM) for i in range(cfg.max_s_len-n): Returns[:,i].data = Returns[:,i].data + (l ** n) * V_es[:, i + n].data advantages = Returns - V_es pos_neq = torch.ge(advantages, 0.0).float() signs = torch.eq(pos_neq, rewards).float() #Do not back propagate through Returns and V_es! biased_advantages = signs * advantages if hasCuda: deltas = Variable(biased_advantages.data.cuda(), requires_grad=False) else: deltas = Variable(biased_advantages.data, requires_grad=False) rlloss = -torch.mean(torch.mean(action_log_policies * deltas * w_mask, dim=1), dim=0) vloss = self.V_loss(Returns, V_es) return rlloss, vloss
en
0.738731
This module applies biased actor-critic training to the decoder RNN. #Critic: #Critic approximates the state-value function of a state. #Do not back propagate through S! #We do not apply any dropout layer as this is a regression model #and the optimizer will apply L2 regularization on the weights. #H3 is now scaler between 0 and 1 #v is now scaler between 0 and 1.0-l which are the boundries for returns w.r.t. l and 0/1 rewards. #least square loss for V. #L2 regularization will be done by optimizer. Returns are the temporal difference returns calculated for each step. They are the target regression values for the Critic V. prev_V is the previous estimates of the Critic V for the returns. We want to minimize the Mean Squared Error between Returns and prev_V. #Do not back propagate through Returns! #mask pads #No negative, this is MSE loss #MSEloss will be plugged in a separate optimizer. #zero the pad vector #Create a variable for initial hidden vector of RNN. #Create a variable for the initial previous tag. #critic V estimates #For the next step #Building gamma matrix to calculate return for each step. for n = 3, gamma=0.9 gM_tensor: array( [[1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0.9 , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0.81, 0.9 , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0.81, 0.9 , 1. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0.81, 0.9 , 1. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0.81, 0.9 , 1. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0.81, 0.9 , 1. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0.81, 0.9 , 1. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0.81, 0.9 , 1. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.81, 0.9 , 1. ] ]) #0/1 reward (hamming loss) for each prediction. #Do not back propagate through Returns and V_es!
3.081838
3
07_gashlycrumb/solution1.py
sidsid14/tiny_python_projects
742
6631464
<gh_stars>100-1000 #!/usr/bin/env python3 """Lookup tables""" import argparse # -------------------------------------------------- def get_args(): """get command-line arguments""" parser = argparse.ArgumentParser( description='Gashlycrumb', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('letter', help='Letter(s)', metavar='letter', nargs='+', type=str) parser.add_argument('-f', '--file', help='Input file', metavar='FILE', type=argparse.FileType('r'), default='gashlycrumb.txt') return parser.parse_args() # -------------------------------------------------- def main(): """Make a jazz noise here""" args = get_args() lookup = {} for line in args.file: lookup[line[0].upper()] = line.rstrip() for letter in args.letter: if letter.upper() in lookup: print(lookup[letter.upper()]) else: print(f'I do not know "{letter}".') # -------------------------------------------------- if __name__ == '__main__': main()
#!/usr/bin/env python3 """Lookup tables""" import argparse # -------------------------------------------------- def get_args(): """get command-line arguments""" parser = argparse.ArgumentParser( description='Gashlycrumb', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('letter', help='Letter(s)', metavar='letter', nargs='+', type=str) parser.add_argument('-f', '--file', help='Input file', metavar='FILE', type=argparse.FileType('r'), default='gashlycrumb.txt') return parser.parse_args() # -------------------------------------------------- def main(): """Make a jazz noise here""" args = get_args() lookup = {} for line in args.file: lookup[line[0].upper()] = line.rstrip() for letter in args.letter: if letter.upper() in lookup: print(lookup[letter.upper()]) else: print(f'I do not know "{letter}".') # -------------------------------------------------- if __name__ == '__main__': main()
en
0.154716
#!/usr/bin/env python3 Lookup tables # -------------------------------------------------- get command-line arguments # -------------------------------------------------- Make a jazz noise here # --------------------------------------------------
3.818656
4
example3.py
MWedl/python-logstash
1
6631465
<reponame>MWedl/python-logstash import logging import logstash import sys host = '127.0.0.1' test_logger = logging.getLogger('python-logstash-logger') test_logger.setLevel(logging.INFO) test_logger.addHandler(logstash.HTTPLogstashHandler(host, 1337, ssl=True, verify=True, username="user", password="pw")) # test_logger.addHandler(logstash.TCPLogstashHandler(host, 5959, version=1)) test_logger.error('python-logstash: test logstash error message.') test_logger.info('python-logstash: test logstash info message.') test_logger.warning('python-logstash: test logstash warning message.') # add extra field to logstash message extra = { 'test_string': 'python version: ' + repr(sys.version_info), 'test_boolean': True, 'test_dict': {'a': 1, 'b': set(['a'])}, 'test_float': 1.23, 'test_integer': 123, 'test_list': [1, 2, 3], } test_logger.info('python-logstash: test extra fields', extra=extra)
import logging import logstash import sys host = '127.0.0.1' test_logger = logging.getLogger('python-logstash-logger') test_logger.setLevel(logging.INFO) test_logger.addHandler(logstash.HTTPLogstashHandler(host, 1337, ssl=True, verify=True, username="user", password="pw")) # test_logger.addHandler(logstash.TCPLogstashHandler(host, 5959, version=1)) test_logger.error('python-logstash: test logstash error message.') test_logger.info('python-logstash: test logstash info message.') test_logger.warning('python-logstash: test logstash warning message.') # add extra field to logstash message extra = { 'test_string': 'python version: ' + repr(sys.version_info), 'test_boolean': True, 'test_dict': {'a': 1, 'b': set(['a'])}, 'test_float': 1.23, 'test_integer': 123, 'test_list': [1, 2, 3], } test_logger.info('python-logstash: test extra fields', extra=extra)
en
0.213432
# test_logger.addHandler(logstash.TCPLogstashHandler(host, 5959, version=1)) # add extra field to logstash message
2.645701
3
My Phrases/chrome_firefox_view_history.py
yasapurnama/autokey-osx-ify
0
6631466
import re window = window.get_active_class() is_chrome = re.search('chrome', window, re.IGNORECASE) is_firefox = re.search('firefox', window, re.IGNORECASE) if is_chrome or is_firefox: keys = "<ctrl>+h" keyboard.send_keys(keys)
import re window = window.get_active_class() is_chrome = re.search('chrome', window, re.IGNORECASE) is_firefox = re.search('firefox', window, re.IGNORECASE) if is_chrome or is_firefox: keys = "<ctrl>+h" keyboard.send_keys(keys)
none
1
2.764446
3
programs/beep.py
ckumpe/robot-inventor-tools
11
6631467
<filename>programs/beep.py from mindstorms import MSHub, Motor, MotorPair, ColorSensor, DistanceSensor, App from mindstorms.control import wait_for_seconds, wait_until, Timer from mindstorms.operator import greater_than, greater_than_or_equal_to, less_than, less_than_or_equal_to, equal_to, not_equal_to import math hub = MSHub() hub.speaker.beep() print('This text will be displayed in the console.')
<filename>programs/beep.py from mindstorms import MSHub, Motor, MotorPair, ColorSensor, DistanceSensor, App from mindstorms.control import wait_for_seconds, wait_until, Timer from mindstorms.operator import greater_than, greater_than_or_equal_to, less_than, less_than_or_equal_to, equal_to, not_equal_to import math hub = MSHub() hub.speaker.beep() print('This text will be displayed in the console.')
none
1
2.660846
3
packages/pytest-simcore/src/pytest_simcore/docker_compose.py
colinRawlings/osparc-simcore
25
6631468
<reponame>colinRawlings/osparc-simcore<filename>packages/pytest-simcore/src/pytest_simcore/docker_compose.py<gh_stars>10-100 # pylint:disable=unused-variable # pylint:disable=unused-argument # pylint:disable=redefined-outer-name """ Fixtures to create docker-compose.yaml configururation files (as in Makefile) Basically runs `docker-compose config """ import os import shutil import sys from copy import deepcopy from pathlib import Path from pprint import pformat from typing import Any, Dict, List, Union import pytest import yaml from _pytest.config import ExitCode from dotenv import dotenv_values from .helpers import ( FIXTURE_CONFIG_CORE_SERVICES_SELECTION, FIXTURE_CONFIG_OPS_SERVICES_SELECTION, ) from .helpers.utils_docker import get_ip, run_docker_compose_config, save_docker_infos @pytest.fixture(scope="session") def testing_environ_vars(env_devel_file: Path) -> Dict[str, Union[str, None]]: """ Loads and extends .env-devel returning all environment variables key=value """ env_devel_unresolved = dotenv_values(env_devel_file, verbose=True, interpolate=True) # get from environ if applicable env_devel = { key: os.environ.get(key, value) for key, value in env_devel_unresolved.items() } # These are overrides to .env-devel or an extension to them env_devel["LOG_LEVEL"] = "DEBUG" env_devel["REGISTRY_SSL"] = "False" env_devel["REGISTRY_URL"] = "{}:5000".format(get_ip()) env_devel["REGISTRY_PATH"] = "127.0.0.1:5000" env_devel["REGISTRY_USER"] = "simcore" env_devel["REGISTRY_PW"] = "" env_devel["REGISTRY_AUTH"] = "False" # CAREFUL! FIXME: monkeypatch autouse ?? env_devel["SWARM_STACK_NAME"] = "pytest-simcore" env_devel.setdefault( "SWARM_STACK_NAME_NO_HYPHEN", env_devel["SWARM_STACK_NAME"].replace("-", "_") ) env_devel["DIRECTOR_REGISTRY_CACHING"] = "False" env_devel.setdefault("DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS", "") env_devel.setdefault("DIRECTOR_SELF_SIGNED_SSL_SECRET_ID", "") env_devel.setdefault("DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME", "") env_devel.setdefault("DIRECTOR_SELF_SIGNED_SSL_FILENAME", "") env_devel["API_SERVER_DEV_FEATURES_ENABLED"] = "1" if not "DOCKER_REGISTRY" in os.environ: env_devel["DOCKER_REGISTRY"] = "local" if not "DOCKER_IMAGE_TAG" in os.environ: env_devel["DOCKER_IMAGE_TAG"] = "production" return env_devel @pytest.fixture(scope="module") def env_file_for_testing( testing_environ_vars: Dict[str, str], temp_folder: Path, osparc_simcore_root_dir: Path, ) -> Path: """Dumps all the environment variables into an $(temp_folder)/.env.test file Pass path as argument in 'docker-compose --env-file ... ' """ # SEE: # https://docs.docker.com/compose/env-file/ # https://docs.docker.com/compose/environment-variables/#the-env-file env_test_path = temp_folder / ".env.test" with env_test_path.open("wt") as fh: print( f"# Auto-generated from env_file_for_testing in {__file__}", file=fh, ) for key in sorted(testing_environ_vars.keys()): print(f"{key}={testing_environ_vars[key]}", file=fh) # # WARNING: since compose files have references to ../.env we MUST create .env # backup_path = osparc_simcore_root_dir / ".env.bak" env_path = osparc_simcore_root_dir / ".env" if env_path.exists(): shutil.copy(env_path, backup_path) shutil.copy(env_test_path, env_path) yield env_path if backup_path.exists(): backup_path.replace(env_path) @pytest.fixture(scope="module") def simcore_docker_compose( osparc_simcore_root_dir: Path, env_file_for_testing: Path, temp_folder: Path, ) -> Dict[str, Any]: """Resolves docker-compose for simcore stack in local host Produces same as `make .stack-simcore-version.yml` in a temporary folder """ COMPOSE_FILENAMES = ["docker-compose.yml", "docker-compose.local.yml"] # ensures .env at git_root_dir assert env_file_for_testing.exists() # target docker-compose path docker_compose_paths = [ osparc_simcore_root_dir / "services" / filename for filename in COMPOSE_FILENAMES ] assert all( docker_compose_path.exists() for docker_compose_path in docker_compose_paths ) config = run_docker_compose_config( project_dir=osparc_simcore_root_dir / "services", docker_compose_paths=docker_compose_paths, env_file_path=env_file_for_testing, destination_path=temp_folder / "simcore_docker_compose.yml", ) print("simcore docker-compose:\n%s", pformat(config)) return config @pytest.fixture(scope="module") def ops_docker_compose( osparc_simcore_root_dir: Path, env_file_for_testing: Path, temp_folder: Path ) -> Dict[str, Any]: """Filters only services in docker-compose-ops.yml and returns yaml data Produces same as `make .stack-ops.yml` in a temporary folder """ # ensures .env at git_root_dir, which will be used as current directory assert env_file_for_testing.exists() # target docker-compose path docker_compose_path = ( osparc_simcore_root_dir / "services" / "docker-compose-ops.yml" ) assert docker_compose_path.exists() config = run_docker_compose_config( project_dir=osparc_simcore_root_dir / "services", docker_compose_paths=docker_compose_path, env_file_path=env_file_for_testing, destination_path=temp_folder / "ops_docker_compose.yml", ) print("ops docker-compose:\n%s", pformat(config)) return config @pytest.fixture(scope="module") def core_services_selection(request) -> List[str]: """Selection of services from the simcore stack""" core_services = getattr(request.module, FIXTURE_CONFIG_CORE_SERVICES_SELECTION, []) assert ( core_services ), f"Expected at least one service in '{FIXTURE_CONFIG_CORE_SERVICES_SELECTION}' within '{request.module.__name__}'" return core_services @pytest.fixture(scope="module") def core_docker_compose_file( core_services_selection: List[str], temp_folder: Path, simcore_docker_compose: Dict ) -> Path: """A compose with a selection of services from simcore_docker_compose Creates a docker-compose config file for every stack of services in 'core_services_selection' module variable File is created in a temp folder """ docker_compose_path = Path(temp_folder / "simcore_docker_compose.filtered.yml") _filter_services_and_dump( core_services_selection, simcore_docker_compose, docker_compose_path ) return docker_compose_path @pytest.fixture(scope="module") def ops_services_selection(request) -> List[str]: """Selection of services from the ops stack""" ops_services = getattr(request.module, FIXTURE_CONFIG_OPS_SERVICES_SELECTION, []) return ops_services @pytest.fixture(scope="module") def ops_docker_compose_file( ops_services_selection: List[str], temp_folder: Path, ops_docker_compose: Dict ) -> Path: """A compose with a selection of services from ops_docker_compose Creates a docker-compose config file for every stack of services in 'ops_services_selection' module variable File is created in a temp folder """ docker_compose_path = Path(temp_folder / "ops_docker_compose.filtered.yml") _filter_services_and_dump( ops_services_selection, ops_docker_compose, docker_compose_path ) return docker_compose_path @pytest.hookimpl() def pytest_exception_interact(node, call, report): # get the node root dir (guaranteed to exist) root_directory: Path = Path(node.config.rootdir) failed_test_directory = root_directory / "test_failures" / node.name save_docker_infos(failed_test_directory) @pytest.hookimpl() def pytest_sessionfinish(session: pytest.Session, exitstatus: ExitCode) -> None: if exitstatus == ExitCode.TESTS_FAILED: # get the node root dir (guaranteed to exist) root_directory: Path = Path(session.fspath) failed_test_directory = root_directory / "test_failures" / session.name save_docker_infos(failed_test_directory) # HELPERS --------------------------------------------- def _minio_fix(service_environs: Dict) -> Dict: """this hack ensures that S3 is accessed from the host at all time, thus pre-signed links work.""" if "S3_ENDPOINT" in service_environs: service_environs["S3_ENDPOINT"] = f"{get_ip()}:9001" return service_environs def _filter_services_and_dump( include: List, services_compose: Dict, docker_compose_path: Path ): content = deepcopy(services_compose) # filters services remove = [name for name in content["services"] if name not in include] for name in remove: content["services"].pop(name, None) for name in include: service = content["services"][name] # removes builds (No more) if "build" in service: service.pop("build", None) if "environment" in service: service["environment"] = _minio_fix(service["environment"]) # updates current docker-compose (also versioned ... do not change by hand) with docker_compose_path.open("wt") as fh: if "TRAVIS" in os.environ: # in travis we do not have access to file print("{:-^100}".format(str(docker_compose_path))) yaml.dump(content, sys.stdout, default_flow_style=False) print("-" * 100) else: # locally we have access to file print(f"Saving config to '{docker_compose_path}'") yaml.dump(content, fh, default_flow_style=False)
# pylint:disable=unused-variable # pylint:disable=unused-argument # pylint:disable=redefined-outer-name """ Fixtures to create docker-compose.yaml configururation files (as in Makefile) Basically runs `docker-compose config """ import os import shutil import sys from copy import deepcopy from pathlib import Path from pprint import pformat from typing import Any, Dict, List, Union import pytest import yaml from _pytest.config import ExitCode from dotenv import dotenv_values from .helpers import ( FIXTURE_CONFIG_CORE_SERVICES_SELECTION, FIXTURE_CONFIG_OPS_SERVICES_SELECTION, ) from .helpers.utils_docker import get_ip, run_docker_compose_config, save_docker_infos @pytest.fixture(scope="session") def testing_environ_vars(env_devel_file: Path) -> Dict[str, Union[str, None]]: """ Loads and extends .env-devel returning all environment variables key=value """ env_devel_unresolved = dotenv_values(env_devel_file, verbose=True, interpolate=True) # get from environ if applicable env_devel = { key: os.environ.get(key, value) for key, value in env_devel_unresolved.items() } # These are overrides to .env-devel or an extension to them env_devel["LOG_LEVEL"] = "DEBUG" env_devel["REGISTRY_SSL"] = "False" env_devel["REGISTRY_URL"] = "{}:5000".format(get_ip()) env_devel["REGISTRY_PATH"] = "127.0.0.1:5000" env_devel["REGISTRY_USER"] = "simcore" env_devel["REGISTRY_PW"] = "" env_devel["REGISTRY_AUTH"] = "False" # CAREFUL! FIXME: monkeypatch autouse ?? env_devel["SWARM_STACK_NAME"] = "pytest-simcore" env_devel.setdefault( "SWARM_STACK_NAME_NO_HYPHEN", env_devel["SWARM_STACK_NAME"].replace("-", "_") ) env_devel["DIRECTOR_REGISTRY_CACHING"] = "False" env_devel.setdefault("DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS", "") env_devel.setdefault("DIRECTOR_SELF_SIGNED_SSL_SECRET_ID", "") env_devel.setdefault("DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME", "") env_devel.setdefault("DIRECTOR_SELF_SIGNED_SSL_FILENAME", "") env_devel["API_SERVER_DEV_FEATURES_ENABLED"] = "1" if not "DOCKER_REGISTRY" in os.environ: env_devel["DOCKER_REGISTRY"] = "local" if not "DOCKER_IMAGE_TAG" in os.environ: env_devel["DOCKER_IMAGE_TAG"] = "production" return env_devel @pytest.fixture(scope="module") def env_file_for_testing( testing_environ_vars: Dict[str, str], temp_folder: Path, osparc_simcore_root_dir: Path, ) -> Path: """Dumps all the environment variables into an $(temp_folder)/.env.test file Pass path as argument in 'docker-compose --env-file ... ' """ # SEE: # https://docs.docker.com/compose/env-file/ # https://docs.docker.com/compose/environment-variables/#the-env-file env_test_path = temp_folder / ".env.test" with env_test_path.open("wt") as fh: print( f"# Auto-generated from env_file_for_testing in {__file__}", file=fh, ) for key in sorted(testing_environ_vars.keys()): print(f"{key}={testing_environ_vars[key]}", file=fh) # # WARNING: since compose files have references to ../.env we MUST create .env # backup_path = osparc_simcore_root_dir / ".env.bak" env_path = osparc_simcore_root_dir / ".env" if env_path.exists(): shutil.copy(env_path, backup_path) shutil.copy(env_test_path, env_path) yield env_path if backup_path.exists(): backup_path.replace(env_path) @pytest.fixture(scope="module") def simcore_docker_compose( osparc_simcore_root_dir: Path, env_file_for_testing: Path, temp_folder: Path, ) -> Dict[str, Any]: """Resolves docker-compose for simcore stack in local host Produces same as `make .stack-simcore-version.yml` in a temporary folder """ COMPOSE_FILENAMES = ["docker-compose.yml", "docker-compose.local.yml"] # ensures .env at git_root_dir assert env_file_for_testing.exists() # target docker-compose path docker_compose_paths = [ osparc_simcore_root_dir / "services" / filename for filename in COMPOSE_FILENAMES ] assert all( docker_compose_path.exists() for docker_compose_path in docker_compose_paths ) config = run_docker_compose_config( project_dir=osparc_simcore_root_dir / "services", docker_compose_paths=docker_compose_paths, env_file_path=env_file_for_testing, destination_path=temp_folder / "simcore_docker_compose.yml", ) print("simcore docker-compose:\n%s", pformat(config)) return config @pytest.fixture(scope="module") def ops_docker_compose( osparc_simcore_root_dir: Path, env_file_for_testing: Path, temp_folder: Path ) -> Dict[str, Any]: """Filters only services in docker-compose-ops.yml and returns yaml data Produces same as `make .stack-ops.yml` in a temporary folder """ # ensures .env at git_root_dir, which will be used as current directory assert env_file_for_testing.exists() # target docker-compose path docker_compose_path = ( osparc_simcore_root_dir / "services" / "docker-compose-ops.yml" ) assert docker_compose_path.exists() config = run_docker_compose_config( project_dir=osparc_simcore_root_dir / "services", docker_compose_paths=docker_compose_path, env_file_path=env_file_for_testing, destination_path=temp_folder / "ops_docker_compose.yml", ) print("ops docker-compose:\n%s", pformat(config)) return config @pytest.fixture(scope="module") def core_services_selection(request) -> List[str]: """Selection of services from the simcore stack""" core_services = getattr(request.module, FIXTURE_CONFIG_CORE_SERVICES_SELECTION, []) assert ( core_services ), f"Expected at least one service in '{FIXTURE_CONFIG_CORE_SERVICES_SELECTION}' within '{request.module.__name__}'" return core_services @pytest.fixture(scope="module") def core_docker_compose_file( core_services_selection: List[str], temp_folder: Path, simcore_docker_compose: Dict ) -> Path: """A compose with a selection of services from simcore_docker_compose Creates a docker-compose config file for every stack of services in 'core_services_selection' module variable File is created in a temp folder """ docker_compose_path = Path(temp_folder / "simcore_docker_compose.filtered.yml") _filter_services_and_dump( core_services_selection, simcore_docker_compose, docker_compose_path ) return docker_compose_path @pytest.fixture(scope="module") def ops_services_selection(request) -> List[str]: """Selection of services from the ops stack""" ops_services = getattr(request.module, FIXTURE_CONFIG_OPS_SERVICES_SELECTION, []) return ops_services @pytest.fixture(scope="module") def ops_docker_compose_file( ops_services_selection: List[str], temp_folder: Path, ops_docker_compose: Dict ) -> Path: """A compose with a selection of services from ops_docker_compose Creates a docker-compose config file for every stack of services in 'ops_services_selection' module variable File is created in a temp folder """ docker_compose_path = Path(temp_folder / "ops_docker_compose.filtered.yml") _filter_services_and_dump( ops_services_selection, ops_docker_compose, docker_compose_path ) return docker_compose_path @pytest.hookimpl() def pytest_exception_interact(node, call, report): # get the node root dir (guaranteed to exist) root_directory: Path = Path(node.config.rootdir) failed_test_directory = root_directory / "test_failures" / node.name save_docker_infos(failed_test_directory) @pytest.hookimpl() def pytest_sessionfinish(session: pytest.Session, exitstatus: ExitCode) -> None: if exitstatus == ExitCode.TESTS_FAILED: # get the node root dir (guaranteed to exist) root_directory: Path = Path(session.fspath) failed_test_directory = root_directory / "test_failures" / session.name save_docker_infos(failed_test_directory) # HELPERS --------------------------------------------- def _minio_fix(service_environs: Dict) -> Dict: """this hack ensures that S3 is accessed from the host at all time, thus pre-signed links work.""" if "S3_ENDPOINT" in service_environs: service_environs["S3_ENDPOINT"] = f"{get_ip()}:9001" return service_environs def _filter_services_and_dump( include: List, services_compose: Dict, docker_compose_path: Path ): content = deepcopy(services_compose) # filters services remove = [name for name in content["services"] if name not in include] for name in remove: content["services"].pop(name, None) for name in include: service = content["services"][name] # removes builds (No more) if "build" in service: service.pop("build", None) if "environment" in service: service["environment"] = _minio_fix(service["environment"]) # updates current docker-compose (also versioned ... do not change by hand) with docker_compose_path.open("wt") as fh: if "TRAVIS" in os.environ: # in travis we do not have access to file print("{:-^100}".format(str(docker_compose_path))) yaml.dump(content, sys.stdout, default_flow_style=False) print("-" * 100) else: # locally we have access to file print(f"Saving config to '{docker_compose_path}'") yaml.dump(content, fh, default_flow_style=False)
en
0.805252
# pylint:disable=unused-variable # pylint:disable=unused-argument # pylint:disable=redefined-outer-name Fixtures to create docker-compose.yaml configururation files (as in Makefile) Basically runs `docker-compose config Loads and extends .env-devel returning all environment variables key=value # get from environ if applicable # These are overrides to .env-devel or an extension to them # CAREFUL! FIXME: monkeypatch autouse ?? Dumps all the environment variables into an $(temp_folder)/.env.test file Pass path as argument in 'docker-compose --env-file ... ' # SEE: # https://docs.docker.com/compose/env-file/ # https://docs.docker.com/compose/environment-variables/#the-env-file # # WARNING: since compose files have references to ../.env we MUST create .env # Resolves docker-compose for simcore stack in local host Produces same as `make .stack-simcore-version.yml` in a temporary folder # ensures .env at git_root_dir # target docker-compose path Filters only services in docker-compose-ops.yml and returns yaml data Produces same as `make .stack-ops.yml` in a temporary folder # ensures .env at git_root_dir, which will be used as current directory # target docker-compose path Selection of services from the simcore stack A compose with a selection of services from simcore_docker_compose Creates a docker-compose config file for every stack of services in 'core_services_selection' module variable File is created in a temp folder Selection of services from the ops stack A compose with a selection of services from ops_docker_compose Creates a docker-compose config file for every stack of services in 'ops_services_selection' module variable File is created in a temp folder # get the node root dir (guaranteed to exist) # get the node root dir (guaranteed to exist) # HELPERS --------------------------------------------- this hack ensures that S3 is accessed from the host at all time, thus pre-signed links work. # filters services # removes builds (No more) # updates current docker-compose (also versioned ... do not change by hand) # in travis we do not have access to file # locally we have access to file
1.992369
2
api/urls.py
catveloper/dynamic_form_generator
0
6631469
from django.urls import path, include from drf_spectacular.views import SpectacularJSONAPIView, SpectacularAPIView, SpectacularSwaggerView, \ SpectacularRedocView from rest_framework import routers from api.viewset import * app_name = 'api' router = routers.DefaultRouter() router.register(r'users', viewset=UserViewSet) router.register(r'groups', viewset=GroupViewSet) router.register(r'projects', viewset=ProjectViewSet) router.register(r'tasks', viewset=TaskViewSet) # Auto Generate API urlpatterns = [ path('form_generate/static/', StaticFormGeneratorAPI.as_view(), name='static_form_schema'), path('', include(router.urls)), ] # Custom API urlpatterns += [ path('', include('form_schema_generator.urls')), ] # Spectacular Document API urlpatterns += [ path("docs/json/", SpectacularJSONAPIView.as_view(), name="schema-json"), path('schema/', SpectacularAPIView.as_view(), name='schema'), path('schema/swagger-ui/', SpectacularSwaggerView.as_view(url_name='api:schema'), name='swagger-ui'), path('schema/redoc/', SpectacularRedocView.as_view(url_name='api:schema'), name='redoc'), ]
from django.urls import path, include from drf_spectacular.views import SpectacularJSONAPIView, SpectacularAPIView, SpectacularSwaggerView, \ SpectacularRedocView from rest_framework import routers from api.viewset import * app_name = 'api' router = routers.DefaultRouter() router.register(r'users', viewset=UserViewSet) router.register(r'groups', viewset=GroupViewSet) router.register(r'projects', viewset=ProjectViewSet) router.register(r'tasks', viewset=TaskViewSet) # Auto Generate API urlpatterns = [ path('form_generate/static/', StaticFormGeneratorAPI.as_view(), name='static_form_schema'), path('', include(router.urls)), ] # Custom API urlpatterns += [ path('', include('form_schema_generator.urls')), ] # Spectacular Document API urlpatterns += [ path("docs/json/", SpectacularJSONAPIView.as_view(), name="schema-json"), path('schema/', SpectacularAPIView.as_view(), name='schema'), path('schema/swagger-ui/', SpectacularSwaggerView.as_view(url_name='api:schema'), name='swagger-ui'), path('schema/redoc/', SpectacularRedocView.as_view(url_name='api:schema'), name='redoc'), ]
en
0.431594
# Auto Generate API # Custom API # Spectacular Document API
2.026158
2
home/admin.py
georgiawang5332/meatFoodManager
0
6631470
<reponame>georgiawang5332/meatFoodManager from django.contrib import admin from .models import * # # # Register your models here. # admin.site.register(UserProfile) # 店家(餐廳) class UserProfileAdmin(admin.ModelAdmin): list_display = ('user', 'user_info', 'phone', 'email', 'city', 'website') def user_info(self, obj): return obj.description def get_queryset(self, request): queryset = super(UserProfileAdmin, self).get_queryset(request) queryset = queryset.order_by('-phone') return queryset user_info.short_description = "Info 2" # list_display = ('user', 'image', 'phone', 'email', 'city', 'website', 'description') # list_filter = ('phone',) # search_fields = ('phone',) # fields = ('user', 'image', 'phone', 'email', 'city', 'website', # 'description') # 這將會使得price欄位出現在restaurant欄位之前,而且name、is_spicy和comment欄位都不會出現,不能被編輯。 # ordering = ('-city',) # list_display_link = ('image', 'phone', 'city', 'website', 'description') # # def user_info(self, obj): # return obj.description # # def get_queryset(self, request): # queryset = super(UserProfileAdmin, self).get_queryset(request) # queryset = queryset.order_by('-phone') # return queryset # # class Meta: # modle = UserProfile # # admin.site.register(UserProfile, UserProfileAdmin)
from django.contrib import admin from .models import * # # # Register your models here. # admin.site.register(UserProfile) # 店家(餐廳) class UserProfileAdmin(admin.ModelAdmin): list_display = ('user', 'user_info', 'phone', 'email', 'city', 'website') def user_info(self, obj): return obj.description def get_queryset(self, request): queryset = super(UserProfileAdmin, self).get_queryset(request) queryset = queryset.order_by('-phone') return queryset user_info.short_description = "Info 2" # list_display = ('user', 'image', 'phone', 'email', 'city', 'website', 'description') # list_filter = ('phone',) # search_fields = ('phone',) # fields = ('user', 'image', 'phone', 'email', 'city', 'website', # 'description') # 這將會使得price欄位出現在restaurant欄位之前,而且name、is_spicy和comment欄位都不會出現,不能被編輯。 # ordering = ('-city',) # list_display_link = ('image', 'phone', 'city', 'website', 'description') # # def user_info(self, obj): # return obj.description # # def get_queryset(self, request): # queryset = super(UserProfileAdmin, self).get_queryset(request) # queryset = queryset.order_by('-phone') # return queryset # # class Meta: # modle = UserProfile # # admin.site.register(UserProfile, UserProfileAdmin)
en
0.215187
# # # Register your models here. # admin.site.register(UserProfile) # 店家(餐廳) # list_display = ('user', 'image', 'phone', 'email', 'city', 'website', 'description') # list_filter = ('phone',) # search_fields = ('phone',) # fields = ('user', 'image', 'phone', 'email', 'city', 'website', # 'description') # 這將會使得price欄位出現在restaurant欄位之前,而且name、is_spicy和comment欄位都不會出現,不能被編輯。 # ordering = ('-city',) # list_display_link = ('image', 'phone', 'city', 'website', 'description') # # def user_info(self, obj): # return obj.description # # def get_queryset(self, request): # queryset = super(UserProfileAdmin, self).get_queryset(request) # queryset = queryset.order_by('-phone') # return queryset # # class Meta: # modle = UserProfile # #
2.08734
2
train_pointnet.py
BowenRaymone/KaggleLyftCompetition
2
6631471
<gh_stars>1-10 from abc import ABC from pathlib import Path from numcodecs import blosc import pandas as pd, numpy as np import os import bisect import itertools as it from tqdm import tqdm import logzero import torch from torch import nn, optim import torch.nn.functional as F from torch.autograd import Variable from pytorch_lightning import Trainer from pytorch_lightning import LightningModule from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.loggers.neptune import NeptuneLogger import pickle, copy, re, time, datetime, random, warnings, gc import zarr from poinet_model import * with open('parameters.json') as json_file: JSON_PARAMETERS = json.load(json_file) DATA_ROOT = Path("/data/lyft-motion-prediction-autonomous-vehicles") TRAIN_ZARR = JSON_PARAMETERS["TRAIN_ZARR"] VALID_ZARR = JSON_PARAMETERS["VALID_ZARR"] HBACKWARD = JSON_PARAMETERS["HBACKWARD"] HFORWARD = JSON_PARAMETERS["HFORWARD"] NFRAMES = JSON_PARAMETERS["NFRAMES"] FRAME_STRIDE = JSON_PARAMETERS["FRAME_STRIDE"] AGENT_FEATURE_DIM = JSON_PARAMETERS["AGENT_FEATURE_DIM"] MAX_AGENTS = JSON_PARAMETERS["MAX_AGENTS"] device = torch.device("cuda" if torch.cuda.is_available() else "cpu") NUM_WORKERS = JSON_PARAMETERS["NUM_WORKERS"] BATCH_SIZE = JSON_PARAMETERS["BATCH_SIZE"] EPOCHS = JSON_PARAMETERS["EPOCHS"] LEARNING_RATE = JSON_PARAMETERS["LEARNING_RATE"] WEIGHT_DECAY = JSON_PARAMETERS["WEIGHT_DECAY"] GRADIENT_CLIP_VAL = JSON_PARAMETERS["GRADIENT_CLIP_VAL"] LIMIT_VAL_BATCHES = JSON_PARAMETERS["LIMIT_VAL_BATCHES"] torch.backends.cudnn.benchmark = True # last_checkpoint = get_last_checkpoint(ROOT) last_checkpoint = None if last_checkpoint is not None: print(f'\n***** RESUMING FROM CHECKPOINT `{last_checkpoint.as_posix()}`***********\n') model = LyftNet.load_from_checkpoint(Path(last_checkpoint).as_posix(), map_location=device, num_workers = NUM_WORKERS, batch_size = BATCH_SIZE) else: print('\n***** NEW MODEL ***********\n') model = LyftNet(batch_size=BATCH_SIZE, lr= LEARNING_RATE, weight_decay=WEIGHT_DECAY, num_workers=NUM_WORKERS) checkpoint_callback = ModelCheckpoint( filepath=ROOT, save_top_k=5, verbose=0, monitor='val_loss', mode='min', prefix='lyfnet_', ) API_KEY = os.environ.get('NEPTUNE_API_KEY') neptune_logger = NeptuneLogger( api_key=API_KEY, project_name='hvergnes/KagglePointNet', params={'epoch_nr': f'{EPOCHS}', 'bs': f'{BATCH_SIZE}', 'LEARNING_RATE': f'{LEARNING_RATE}', 'WEIGHT_DECAY': f'{WEIGHT_DECAY}', 'HBACKWARD': f'{HBACKWARD}', 'HFORWARD': f'{HFORWARD}', 'NFRAMES': f'{NFRAMES}', "FRAME_STRIDE": f"{FRAME_STRIDE}", "AGENT_FEATURE_DIM": f"{AGENT_FEATURE_DIM}", "MAX_AGENTS": f"{MAX_AGENTS}"}, tags=['baseline'], ) # print(model) trainer = Trainer( max_epochs=EPOCHS, gradient_clip_val=GRADIENT_CLIP_VAL, logger=neptune_logger, checkpoint_callback=checkpoint_callback, limit_val_batches=LIMIT_VAL_BATCHES, gpus=1 ) trainer.fit(model) torch.save(model.state_dict(), f'save/PointNetE:{EPOCHS}LR:{LEARNING_RATE}.pt')
from abc import ABC from pathlib import Path from numcodecs import blosc import pandas as pd, numpy as np import os import bisect import itertools as it from tqdm import tqdm import logzero import torch from torch import nn, optim import torch.nn.functional as F from torch.autograd import Variable from pytorch_lightning import Trainer from pytorch_lightning import LightningModule from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.loggers.neptune import NeptuneLogger import pickle, copy, re, time, datetime, random, warnings, gc import zarr from poinet_model import * with open('parameters.json') as json_file: JSON_PARAMETERS = json.load(json_file) DATA_ROOT = Path("/data/lyft-motion-prediction-autonomous-vehicles") TRAIN_ZARR = JSON_PARAMETERS["TRAIN_ZARR"] VALID_ZARR = JSON_PARAMETERS["VALID_ZARR"] HBACKWARD = JSON_PARAMETERS["HBACKWARD"] HFORWARD = JSON_PARAMETERS["HFORWARD"] NFRAMES = JSON_PARAMETERS["NFRAMES"] FRAME_STRIDE = JSON_PARAMETERS["FRAME_STRIDE"] AGENT_FEATURE_DIM = JSON_PARAMETERS["AGENT_FEATURE_DIM"] MAX_AGENTS = JSON_PARAMETERS["MAX_AGENTS"] device = torch.device("cuda" if torch.cuda.is_available() else "cpu") NUM_WORKERS = JSON_PARAMETERS["NUM_WORKERS"] BATCH_SIZE = JSON_PARAMETERS["BATCH_SIZE"] EPOCHS = JSON_PARAMETERS["EPOCHS"] LEARNING_RATE = JSON_PARAMETERS["LEARNING_RATE"] WEIGHT_DECAY = JSON_PARAMETERS["WEIGHT_DECAY"] GRADIENT_CLIP_VAL = JSON_PARAMETERS["GRADIENT_CLIP_VAL"] LIMIT_VAL_BATCHES = JSON_PARAMETERS["LIMIT_VAL_BATCHES"] torch.backends.cudnn.benchmark = True # last_checkpoint = get_last_checkpoint(ROOT) last_checkpoint = None if last_checkpoint is not None: print(f'\n***** RESUMING FROM CHECKPOINT `{last_checkpoint.as_posix()}`***********\n') model = LyftNet.load_from_checkpoint(Path(last_checkpoint).as_posix(), map_location=device, num_workers = NUM_WORKERS, batch_size = BATCH_SIZE) else: print('\n***** NEW MODEL ***********\n') model = LyftNet(batch_size=BATCH_SIZE, lr= LEARNING_RATE, weight_decay=WEIGHT_DECAY, num_workers=NUM_WORKERS) checkpoint_callback = ModelCheckpoint( filepath=ROOT, save_top_k=5, verbose=0, monitor='val_loss', mode='min', prefix='lyfnet_', ) API_KEY = os.environ.get('NEPTUNE_API_KEY') neptune_logger = NeptuneLogger( api_key=API_KEY, project_name='hvergnes/KagglePointNet', params={'epoch_nr': f'{EPOCHS}', 'bs': f'{BATCH_SIZE}', 'LEARNING_RATE': f'{LEARNING_RATE}', 'WEIGHT_DECAY': f'{WEIGHT_DECAY}', 'HBACKWARD': f'{HBACKWARD}', 'HFORWARD': f'{HFORWARD}', 'NFRAMES': f'{NFRAMES}', "FRAME_STRIDE": f"{FRAME_STRIDE}", "AGENT_FEATURE_DIM": f"{AGENT_FEATURE_DIM}", "MAX_AGENTS": f"{MAX_AGENTS}"}, tags=['baseline'], ) # print(model) trainer = Trainer( max_epochs=EPOCHS, gradient_clip_val=GRADIENT_CLIP_VAL, logger=neptune_logger, checkpoint_callback=checkpoint_callback, limit_val_batches=LIMIT_VAL_BATCHES, gpus=1 ) trainer.fit(model) torch.save(model.state_dict(), f'save/PointNetE:{EPOCHS}LR:{LEARNING_RATE}.pt')
en
0.695805
# last_checkpoint = get_last_checkpoint(ROOT) # print(model)
1.905018
2
config.py
ominatechnologies/frozendict
0
6631472
# Single-sourced project configuration values: # The full version, including alpha/beta/rc tags: release = "2021.11.30" version = release # Distribution package name: name = "frozendict" # Capitalized label: project = "Frozendict" description = "A modern implementation of FrozenDict." author = "<NAME>" author_email = "<EMAIL>" copyright = "2021, Omina Technologies" repo = "https://github.com/ominatechnologies/frozendict"
# Single-sourced project configuration values: # The full version, including alpha/beta/rc tags: release = "2021.11.30" version = release # Distribution package name: name = "frozendict" # Capitalized label: project = "Frozendict" description = "A modern implementation of FrozenDict." author = "<NAME>" author_email = "<EMAIL>" copyright = "2021, Omina Technologies" repo = "https://github.com/ominatechnologies/frozendict"
en
0.70752
# Single-sourced project configuration values: # The full version, including alpha/beta/rc tags: # Distribution package name: # Capitalized label:
0.984577
1
mesh_tensorflow/auto_mtf/layout_optimizer.py
merrymercy/mesh
1,264
6631473
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Computes layouts for Mesh TensorFlow. Classes and methods to encode a Mesh TensorFlow computation as a series of Operations and then find a layout to minimize per-machine memory usage. Sample Usage: mtf_graph = mtf.Graph() mesh = mtf.Mesh(mtf_graph, "my_mesh") mesh_shape = mtf.convert_to_shape("m1:2;m2:2") # Add some operations to mesh using Mesh TensorFlow. estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape) optimizer = layout_optimizer.LayoutOptimizer(estimator) layout = optimizer.solve() """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools from absl import logging from mesh_tensorflow.auto_mtf import print_cp_model_solution from mesh_tensorflow.auto_mtf import scheduler import six from ortools.sat.python import cp_model class SolverError(Exception): pass class LayoutOptimizer(object): """Tries to compute a good layout for Mesh Tensorflow. Given a mesh shape (see Mesh TensorFlow) and several operations, computes a good layout (a mapping from TensorFlow dimensions to mesh dimensions) using integer programming. More formally, suppose T is the set of TensorFlow dimensions and M is the set of mesh dimensions. A layout L is a map from (T) to (M union {"unassigned"}), designating which tensor dimensions are split using which mesh dimensions. We wish to compute a layout that minimizes memory usage. Unfortunately, the memory usage doesn't just depend on the layout, but also on how the scheduler orders operations; whenever an operation is being performed, there are other tensors in memory besides that operation's input and output tensors. The layout, however, affects the size of the various tensors in memory, as well as *the amount of temporary memory the operation uses*. With this in mind, our (boolean) integer program to minimize memory is: Variables: x_{t->m}: "global" (boolean) variables; takes a value of 1 if t in T is assigned to m in M, and a value of 0 otherwise. y_{assignment}: "local" (boolean) variables; for every set of TensorFlow dimensions used in an operation or tensor and for every (valid) assignment from that set of TF dimensions to (M union {"unassigned"}), we have one of these which takes a value of 1 if the global variables completely agree with that assignment and 0 otherwise. z: memory (continuous) variable; the peak memory usage. Constraints: Operation Constraints: for every operation, no two dimensions used in that operation can be mapped to the same mesh dimension (since doing so would cause some of its computation to be skipped entirely). Global Constraints: we enforce that every TensorFlow dimension is assigned to at most one mesh dimension (it may be unassigned). (Optional) Divisibility Constraints: we enforce that a TensorFlow dimension can only be assigned to a mesh dimension if the latter's size evenly divides the former's size. Local Constraints: we enforce that out of all assignments that share a domain (i.e. set of TensorFlow dimensions), exactly one is chosen. Global-to-Local Constraints: we enforce that assignment(t) = m, then x_{t->m} must be 1 for y_{assignment} to be 1. We also enforce that if assignment(t) = "unassigned", then x_{t->m} must be 0 for all m in M. Memory Constraints: for every operation i, the peak memory usage z must be least the memory usage during that operation. The latter can be derived from memory_contents[i] and the local variables relevant to those tensors (their new sizes) and to the operation (temporary memory needed). Objective Function: We want to minimize the variable z. However, we want to tiebreak by the number of assigned dimensions (preferring more dimensions), so our modified objective is (#MTF Dimensions + 1) * z - sum x_{t->m}. Note that we prefer more splitting because in general splits result in smaller tensors and less duplicated work. """ def __init__(self, memory_estimator, scheduler_alg="LIST"): """Uses a auto_mtf.memory_estimator to set up the integer program. Args: memory_estimator: a memory_estimator.MemoryEstimator. scheduler_alg: an optional string, see scheduler.minimize_peak_memory. """ self._estimator = memory_estimator self._scheduler_alg = scheduler_alg self._layout_validator = self._estimator.get_layout_validator() self._graph = self._estimator.get_graph_interface() self._memory_contents = None # [frozenset(string)] # Initialize the model. self._model = cp_model.CpModel() self._preprocess_input() self._initialize_variables() self._add_constraints() self._build_objective_function() def _preprocess_input(self): """Computing useful input data structures to ease IP construction.""" # Compute the sets of MTF dimensions used in operations/tensors. # a {string: frozenset(string)}, mapping operation name to MTF dimension # names. self._operation_name_to_mtf_dimension_set = {} # a {string: frozenset(string)}, mapping tensor name to MTF dimension names. self._tensor_name_to_mtf_dimension_set = {} for operation_name in self._graph.get_all_operation_names(): self._operation_name_to_mtf_dimension_set[operation_name] = frozenset( set(self._graph.get_operation_mtf_dimension_names( operation_name)).intersection( self._layout_validator.splittable_mtf_dimension_names)) for tensor_name in self._graph.get_all_tensor_names(): self._tensor_name_to_mtf_dimension_set[tensor_name] = frozenset( set(self._graph.get_tensor_mtf_dimension_names(tensor_name)) .intersection(self._layout_validator.splittable_mtf_dimension_names)) self._operation_mtf_dimension_sets = set( self._operation_name_to_mtf_dimension_set.values()) self._mtf_dimension_sets = self._operation_mtf_dimension_sets | set( self._tensor_name_to_mtf_dimension_set.values()) # Compute possible assignments for each set of MTF dimensions. self._assignments = {} # indexed by MTF dimension set for mtf_dimension_set in self._mtf_dimension_sets: self._assignments[mtf_dimension_set] = _generate_assignments( mtf_dimension_set, self._layout_validator.mesh_dimension_name_to_size) def _initialize_variables(self): """Initializing the variables of the IP.""" # Initialize global variables. self._global_vars = {} # Indexed by (MTF dimension, mesh dimension) for mtf_dimension_name in ( self._layout_validator.splittable_mtf_dimension_names): for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): name = _global_var_name(mtf_dimension_name, mesh_dimension_name) self._global_vars[(mtf_dimension_name, mesh_dimension_name)] = ( self._model.NewBoolVar(name)) # Initialize local variables. self._local_vars = {} # Indexed by (tensorflow dimension set), then name of # assignment. for mtf_dimension_set in self._mtf_dimension_sets: self._local_vars[mtf_dimension_set] = {} for assignment in self._assignments[mtf_dimension_set]: # TODO(joshuawang): Avoid hash collision no matter what dimension names # are; don't hash by this local var name, swap to using a tuple encoding # of the full assignment instead. name = _local_var_name(mtf_dimension_set, assignment) self._local_vars[mtf_dimension_set][name] = ( self._model.NewBoolVar(name)) # Initialize memory variable. We need a crude upper bound on memory, so we # use the total size of all tensors under the empty assignment. # NOTE(joshuawang): This bound could be improved by factoring in the # schedule. memory_upper_bound = 0 for tensor_name in self._graph.get_all_tensor_names(): if self._graph.is_tensor_on_canonical_device(tensor_name): memory_upper_bound += int(self._graph.get_tensor_size(tensor_name)) self._memory_var = self._model.NewIntVar(0, memory_upper_bound, "z") def _add_constraints(self): """Adding constraints to the IP.""" # Add operation constraints. for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): for mtf_dimension_set in self._operation_mtf_dimension_sets: self._model.Add( sum(self._global_vars[(mtf_dimension_name, mesh_dimension_name)] for mtf_dimension_name in mtf_dimension_set) <= 1) # Add global constraints. for mtf_dimension_name in ( self._layout_validator.splittable_mtf_dimension_names): self._model.Add( sum(self._global_vars[(mtf_dimension_name, mesh_dimension_name)] for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size)) <= 1) # Add divisibility constraints. for mtf_dimension_name in ( self._layout_validator.splittable_mtf_dimension_names): for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): if not self._layout_validator.is_valid_assignment(mtf_dimension_name, mesh_dimension_name): self._model.Add(self._global_vars[(mtf_dimension_name, mesh_dimension_name)] == 0) # Add local constraints. for mtf_dimension_set in self._mtf_dimension_sets: self._model.Add( sum(self._local_vars[mtf_dimension_set][_local_var_name( mtf_dimension_set, assignment)] for assignment in self._assignments[mtf_dimension_set]) == 1) # Add local-to-global constraints. for mtf_dimension_set in self._mtf_dimension_sets: for assignment in self._assignments[mtf_dimension_set]: name = _local_var_name(mtf_dimension_set, assignment) for mtf_dimension_name in mtf_dimension_set: if mtf_dimension_name in assignment: mesh_dimension_name = assignment[mtf_dimension_name] self._model.AddImplication( self._local_vars[mtf_dimension_set][name], self._global_vars[(mtf_dimension_name, mesh_dimension_name)]) else: for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): self._model.AddImplication( self._global_vars[(mtf_dimension_name, mesh_dimension_name)], self._local_vars[mtf_dimension_set][name].Not()) # Add memory constraints. tensor_memory_sum = {} for tensor_name in self._graph.get_all_tensor_names(): tensor_memory_sum[tensor_name] = 0 mtf_dimension_set = self._tensor_name_to_mtf_dimension_set[tensor_name] if not self._graph.is_tensor_on_canonical_device(tensor_name): continue for assignment in self._assignments[mtf_dimension_set]: size_under_assignment = self._graph.get_tensor_size( tensor_name, assignment, self._layout_validator.mesh_dimension_name_to_size) name = _local_var_name(mtf_dimension_set, assignment) tensor_memory_sum[tensor_name] += ( size_under_assignment * self._local_vars[mtf_dimension_set][name]) for tensor_names in self._get_memory_contents(): self._model.Add( sum(tensor_memory_sum[tensor_name] for tensor_name in tensor_names) <= self._memory_var) def _build_objective_function(self): """Builds the objective function of the IP.""" # Break ties in favor of more assignments. scale = len(self._layout_validator.splittable_mtf_dimension_names) + 1 objective = scale * self._memory_var - sum(six.itervalues( self._global_vars)) self._model.Minimize(objective) def _get_memory_contents(self): """Runs the scheduler to determine memory contents at every point in time. Returns: a list of frozenset of strings, where the ith entry describes the tensors in memory when executing operation i (where schedule[i] is an index into GetAllOperationNames()). """ if self._memory_contents is not None: return self._memory_contents schedule = scheduler.minimize_peak_memory(self._graph, self._scheduler_alg) self._memory_contents = self._graph.compute_memory_contents_under_schedule( schedule) return self._memory_contents def solve(self, print_solution=False): """Solves the current integer program and returns the computed layout. Args: print_solution: An optional boolean indicating whether to print the full solution in human-readable format. Returns: The computed layout (as a string). Raises: SolverError: the internal solver could not find a solution, or the solution found is infeasible. """ # Solve and see how well the solver did. self._cp_solver = cp_model.CpSolver() status = self._cp_solver.Solve(self._model) if status != cp_model.OPTIMAL: if status == cp_model.FEASIBLE: logging.warning("A potentially suboptimal solution was found.") else: logging.error("Solver returned status %d.", status) raise SolverError("The solver could not solve the problem and returned " "status {}.".format(status)) # TODO(joshuawang): Verify the solver's solution. if print_solution: print_cp_model_solution.print_solution(self._model, self._cp_solver) # Reconstruct layout from solution. layout = [] for mtf_dimension_name in ( self._layout_validator.splittable_mtf_dimension_names): for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): value = self._cp_solver.Value(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]) if value: # Value is integer. layout.append(mtf_dimension_name + ":" + mesh_dimension_name) layout.sort() return ";".join(layout) def evaluate_layout(self, layout): """The current objective value for the given layout. TODO(joshuawang): The current function does not check that the given layout is valid. Args: layout: a string, representing a layout to evaluate (e.g. "d_ff:m1;heads:m2"). Returns: A float, the objective value. """ layout_dict = {} if layout: for pair in layout.split(";"): mtf_dimension_name, mesh_dimension_name = pair.split(":", 1) if (mtf_dimension_name in self._layout_validator.splittable_mtf_dimension_names): layout_dict[mtf_dimension_name] = mesh_dimension_name else: logging.warning("Skipping unsplittable dimension %s.", mtf_dimension_name) tensor_memory = {} # {string: float}, size of each tensor under our layout for tensor_name in self._graph.get_all_tensor_names(): if self._graph.is_tensor_on_canonical_device(tensor_name): tensor_memory[tensor_name] = self._graph.get_tensor_size( tensor_name, layout_dict, self._layout_validator.mesh_dimension_name_to_size) else: tensor_memory[tensor_name] = 0.0 peak_memory_usage = 0.0 for tensor_names in self._get_memory_contents(): memory_usage = 0.0 for tensor_name in tensor_names: memory_usage += tensor_memory[tensor_name] peak_memory_usage = max(peak_memory_usage, memory_usage) return peak_memory_usage def _global_var_name(splittable_dimension, mesh_dimension): """Name for a global variable. Args: splittable_dimension: the name of a splittable dimension (string) mesh_dimension: the name of a mesh dimension (string) Returns: A string, the variable name. """ return "x_({}:{})".format(splittable_dimension, mesh_dimension) def _local_var_name(splittable_dimensions, assignment): """Name for a local variable. Args: splittable_dimensions: frozenset of names of splittable dimensions. assignment: dict from names of splittable dimensions to names of mesh dimensions. Returns: A string, the variable name. """ assignment_string = [] for splittable in sorted(splittable_dimensions): if splittable in assignment: assignment_string.append("{}:{}".format(splittable, assignment[splittable])) else: assignment_string.append("{}".format(splittable)) return "y_(" + ",".join(assignment_string) + ")" def _generate_assignments(splittable_dimensions, mesh_dimension_to_size): """Generates all ways to map splittable dimensions to mesh dimensions. Args: splittable_dimensions: a frozenset of the names of splittable dimensions. mesh_dimension_to_size: a dictionary from mesh dimension name to size. Returns: A list of the valid assignments. Each assignment is a dict keyed by every splittable dimension, whose value is either a mesh dimension or None. """ assignments = [] for assignment_size in six.moves.xrange( 1 + min(len(splittable_dimensions), len(mesh_dimension_to_size))): for s_dims_chosen in itertools.combinations(splittable_dimensions, assignment_size): for m_dims_chosen in itertools.permutations(mesh_dimension_to_size, assignment_size): assignments.append(dict(zip(s_dims_chosen, m_dims_chosen))) return assignments
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Computes layouts for Mesh TensorFlow. Classes and methods to encode a Mesh TensorFlow computation as a series of Operations and then find a layout to minimize per-machine memory usage. Sample Usage: mtf_graph = mtf.Graph() mesh = mtf.Mesh(mtf_graph, "my_mesh") mesh_shape = mtf.convert_to_shape("m1:2;m2:2") # Add some operations to mesh using Mesh TensorFlow. estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape) optimizer = layout_optimizer.LayoutOptimizer(estimator) layout = optimizer.solve() """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools from absl import logging from mesh_tensorflow.auto_mtf import print_cp_model_solution from mesh_tensorflow.auto_mtf import scheduler import six from ortools.sat.python import cp_model class SolverError(Exception): pass class LayoutOptimizer(object): """Tries to compute a good layout for Mesh Tensorflow. Given a mesh shape (see Mesh TensorFlow) and several operations, computes a good layout (a mapping from TensorFlow dimensions to mesh dimensions) using integer programming. More formally, suppose T is the set of TensorFlow dimensions and M is the set of mesh dimensions. A layout L is a map from (T) to (M union {"unassigned"}), designating which tensor dimensions are split using which mesh dimensions. We wish to compute a layout that minimizes memory usage. Unfortunately, the memory usage doesn't just depend on the layout, but also on how the scheduler orders operations; whenever an operation is being performed, there are other tensors in memory besides that operation's input and output tensors. The layout, however, affects the size of the various tensors in memory, as well as *the amount of temporary memory the operation uses*. With this in mind, our (boolean) integer program to minimize memory is: Variables: x_{t->m}: "global" (boolean) variables; takes a value of 1 if t in T is assigned to m in M, and a value of 0 otherwise. y_{assignment}: "local" (boolean) variables; for every set of TensorFlow dimensions used in an operation or tensor and for every (valid) assignment from that set of TF dimensions to (M union {"unassigned"}), we have one of these which takes a value of 1 if the global variables completely agree with that assignment and 0 otherwise. z: memory (continuous) variable; the peak memory usage. Constraints: Operation Constraints: for every operation, no two dimensions used in that operation can be mapped to the same mesh dimension (since doing so would cause some of its computation to be skipped entirely). Global Constraints: we enforce that every TensorFlow dimension is assigned to at most one mesh dimension (it may be unassigned). (Optional) Divisibility Constraints: we enforce that a TensorFlow dimension can only be assigned to a mesh dimension if the latter's size evenly divides the former's size. Local Constraints: we enforce that out of all assignments that share a domain (i.e. set of TensorFlow dimensions), exactly one is chosen. Global-to-Local Constraints: we enforce that assignment(t) = m, then x_{t->m} must be 1 for y_{assignment} to be 1. We also enforce that if assignment(t) = "unassigned", then x_{t->m} must be 0 for all m in M. Memory Constraints: for every operation i, the peak memory usage z must be least the memory usage during that operation. The latter can be derived from memory_contents[i] and the local variables relevant to those tensors (their new sizes) and to the operation (temporary memory needed). Objective Function: We want to minimize the variable z. However, we want to tiebreak by the number of assigned dimensions (preferring more dimensions), so our modified objective is (#MTF Dimensions + 1) * z - sum x_{t->m}. Note that we prefer more splitting because in general splits result in smaller tensors and less duplicated work. """ def __init__(self, memory_estimator, scheduler_alg="LIST"): """Uses a auto_mtf.memory_estimator to set up the integer program. Args: memory_estimator: a memory_estimator.MemoryEstimator. scheduler_alg: an optional string, see scheduler.minimize_peak_memory. """ self._estimator = memory_estimator self._scheduler_alg = scheduler_alg self._layout_validator = self._estimator.get_layout_validator() self._graph = self._estimator.get_graph_interface() self._memory_contents = None # [frozenset(string)] # Initialize the model. self._model = cp_model.CpModel() self._preprocess_input() self._initialize_variables() self._add_constraints() self._build_objective_function() def _preprocess_input(self): """Computing useful input data structures to ease IP construction.""" # Compute the sets of MTF dimensions used in operations/tensors. # a {string: frozenset(string)}, mapping operation name to MTF dimension # names. self._operation_name_to_mtf_dimension_set = {} # a {string: frozenset(string)}, mapping tensor name to MTF dimension names. self._tensor_name_to_mtf_dimension_set = {} for operation_name in self._graph.get_all_operation_names(): self._operation_name_to_mtf_dimension_set[operation_name] = frozenset( set(self._graph.get_operation_mtf_dimension_names( operation_name)).intersection( self._layout_validator.splittable_mtf_dimension_names)) for tensor_name in self._graph.get_all_tensor_names(): self._tensor_name_to_mtf_dimension_set[tensor_name] = frozenset( set(self._graph.get_tensor_mtf_dimension_names(tensor_name)) .intersection(self._layout_validator.splittable_mtf_dimension_names)) self._operation_mtf_dimension_sets = set( self._operation_name_to_mtf_dimension_set.values()) self._mtf_dimension_sets = self._operation_mtf_dimension_sets | set( self._tensor_name_to_mtf_dimension_set.values()) # Compute possible assignments for each set of MTF dimensions. self._assignments = {} # indexed by MTF dimension set for mtf_dimension_set in self._mtf_dimension_sets: self._assignments[mtf_dimension_set] = _generate_assignments( mtf_dimension_set, self._layout_validator.mesh_dimension_name_to_size) def _initialize_variables(self): """Initializing the variables of the IP.""" # Initialize global variables. self._global_vars = {} # Indexed by (MTF dimension, mesh dimension) for mtf_dimension_name in ( self._layout_validator.splittable_mtf_dimension_names): for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): name = _global_var_name(mtf_dimension_name, mesh_dimension_name) self._global_vars[(mtf_dimension_name, mesh_dimension_name)] = ( self._model.NewBoolVar(name)) # Initialize local variables. self._local_vars = {} # Indexed by (tensorflow dimension set), then name of # assignment. for mtf_dimension_set in self._mtf_dimension_sets: self._local_vars[mtf_dimension_set] = {} for assignment in self._assignments[mtf_dimension_set]: # TODO(joshuawang): Avoid hash collision no matter what dimension names # are; don't hash by this local var name, swap to using a tuple encoding # of the full assignment instead. name = _local_var_name(mtf_dimension_set, assignment) self._local_vars[mtf_dimension_set][name] = ( self._model.NewBoolVar(name)) # Initialize memory variable. We need a crude upper bound on memory, so we # use the total size of all tensors under the empty assignment. # NOTE(joshuawang): This bound could be improved by factoring in the # schedule. memory_upper_bound = 0 for tensor_name in self._graph.get_all_tensor_names(): if self._graph.is_tensor_on_canonical_device(tensor_name): memory_upper_bound += int(self._graph.get_tensor_size(tensor_name)) self._memory_var = self._model.NewIntVar(0, memory_upper_bound, "z") def _add_constraints(self): """Adding constraints to the IP.""" # Add operation constraints. for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): for mtf_dimension_set in self._operation_mtf_dimension_sets: self._model.Add( sum(self._global_vars[(mtf_dimension_name, mesh_dimension_name)] for mtf_dimension_name in mtf_dimension_set) <= 1) # Add global constraints. for mtf_dimension_name in ( self._layout_validator.splittable_mtf_dimension_names): self._model.Add( sum(self._global_vars[(mtf_dimension_name, mesh_dimension_name)] for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size)) <= 1) # Add divisibility constraints. for mtf_dimension_name in ( self._layout_validator.splittable_mtf_dimension_names): for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): if not self._layout_validator.is_valid_assignment(mtf_dimension_name, mesh_dimension_name): self._model.Add(self._global_vars[(mtf_dimension_name, mesh_dimension_name)] == 0) # Add local constraints. for mtf_dimension_set in self._mtf_dimension_sets: self._model.Add( sum(self._local_vars[mtf_dimension_set][_local_var_name( mtf_dimension_set, assignment)] for assignment in self._assignments[mtf_dimension_set]) == 1) # Add local-to-global constraints. for mtf_dimension_set in self._mtf_dimension_sets: for assignment in self._assignments[mtf_dimension_set]: name = _local_var_name(mtf_dimension_set, assignment) for mtf_dimension_name in mtf_dimension_set: if mtf_dimension_name in assignment: mesh_dimension_name = assignment[mtf_dimension_name] self._model.AddImplication( self._local_vars[mtf_dimension_set][name], self._global_vars[(mtf_dimension_name, mesh_dimension_name)]) else: for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): self._model.AddImplication( self._global_vars[(mtf_dimension_name, mesh_dimension_name)], self._local_vars[mtf_dimension_set][name].Not()) # Add memory constraints. tensor_memory_sum = {} for tensor_name in self._graph.get_all_tensor_names(): tensor_memory_sum[tensor_name] = 0 mtf_dimension_set = self._tensor_name_to_mtf_dimension_set[tensor_name] if not self._graph.is_tensor_on_canonical_device(tensor_name): continue for assignment in self._assignments[mtf_dimension_set]: size_under_assignment = self._graph.get_tensor_size( tensor_name, assignment, self._layout_validator.mesh_dimension_name_to_size) name = _local_var_name(mtf_dimension_set, assignment) tensor_memory_sum[tensor_name] += ( size_under_assignment * self._local_vars[mtf_dimension_set][name]) for tensor_names in self._get_memory_contents(): self._model.Add( sum(tensor_memory_sum[tensor_name] for tensor_name in tensor_names) <= self._memory_var) def _build_objective_function(self): """Builds the objective function of the IP.""" # Break ties in favor of more assignments. scale = len(self._layout_validator.splittable_mtf_dimension_names) + 1 objective = scale * self._memory_var - sum(six.itervalues( self._global_vars)) self._model.Minimize(objective) def _get_memory_contents(self): """Runs the scheduler to determine memory contents at every point in time. Returns: a list of frozenset of strings, where the ith entry describes the tensors in memory when executing operation i (where schedule[i] is an index into GetAllOperationNames()). """ if self._memory_contents is not None: return self._memory_contents schedule = scheduler.minimize_peak_memory(self._graph, self._scheduler_alg) self._memory_contents = self._graph.compute_memory_contents_under_schedule( schedule) return self._memory_contents def solve(self, print_solution=False): """Solves the current integer program and returns the computed layout. Args: print_solution: An optional boolean indicating whether to print the full solution in human-readable format. Returns: The computed layout (as a string). Raises: SolverError: the internal solver could not find a solution, or the solution found is infeasible. """ # Solve and see how well the solver did. self._cp_solver = cp_model.CpSolver() status = self._cp_solver.Solve(self._model) if status != cp_model.OPTIMAL: if status == cp_model.FEASIBLE: logging.warning("A potentially suboptimal solution was found.") else: logging.error("Solver returned status %d.", status) raise SolverError("The solver could not solve the problem and returned " "status {}.".format(status)) # TODO(joshuawang): Verify the solver's solution. if print_solution: print_cp_model_solution.print_solution(self._model, self._cp_solver) # Reconstruct layout from solution. layout = [] for mtf_dimension_name in ( self._layout_validator.splittable_mtf_dimension_names): for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): value = self._cp_solver.Value(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]) if value: # Value is integer. layout.append(mtf_dimension_name + ":" + mesh_dimension_name) layout.sort() return ";".join(layout) def evaluate_layout(self, layout): """The current objective value for the given layout. TODO(joshuawang): The current function does not check that the given layout is valid. Args: layout: a string, representing a layout to evaluate (e.g. "d_ff:m1;heads:m2"). Returns: A float, the objective value. """ layout_dict = {} if layout: for pair in layout.split(";"): mtf_dimension_name, mesh_dimension_name = pair.split(":", 1) if (mtf_dimension_name in self._layout_validator.splittable_mtf_dimension_names): layout_dict[mtf_dimension_name] = mesh_dimension_name else: logging.warning("Skipping unsplittable dimension %s.", mtf_dimension_name) tensor_memory = {} # {string: float}, size of each tensor under our layout for tensor_name in self._graph.get_all_tensor_names(): if self._graph.is_tensor_on_canonical_device(tensor_name): tensor_memory[tensor_name] = self._graph.get_tensor_size( tensor_name, layout_dict, self._layout_validator.mesh_dimension_name_to_size) else: tensor_memory[tensor_name] = 0.0 peak_memory_usage = 0.0 for tensor_names in self._get_memory_contents(): memory_usage = 0.0 for tensor_name in tensor_names: memory_usage += tensor_memory[tensor_name] peak_memory_usage = max(peak_memory_usage, memory_usage) return peak_memory_usage def _global_var_name(splittable_dimension, mesh_dimension): """Name for a global variable. Args: splittable_dimension: the name of a splittable dimension (string) mesh_dimension: the name of a mesh dimension (string) Returns: A string, the variable name. """ return "x_({}:{})".format(splittable_dimension, mesh_dimension) def _local_var_name(splittable_dimensions, assignment): """Name for a local variable. Args: splittable_dimensions: frozenset of names of splittable dimensions. assignment: dict from names of splittable dimensions to names of mesh dimensions. Returns: A string, the variable name. """ assignment_string = [] for splittable in sorted(splittable_dimensions): if splittable in assignment: assignment_string.append("{}:{}".format(splittable, assignment[splittable])) else: assignment_string.append("{}".format(splittable)) return "y_(" + ",".join(assignment_string) + ")" def _generate_assignments(splittable_dimensions, mesh_dimension_to_size): """Generates all ways to map splittable dimensions to mesh dimensions. Args: splittable_dimensions: a frozenset of the names of splittable dimensions. mesh_dimension_to_size: a dictionary from mesh dimension name to size. Returns: A list of the valid assignments. Each assignment is a dict keyed by every splittable dimension, whose value is either a mesh dimension or None. """ assignments = [] for assignment_size in six.moves.xrange( 1 + min(len(splittable_dimensions), len(mesh_dimension_to_size))): for s_dims_chosen in itertools.combinations(splittable_dimensions, assignment_size): for m_dims_chosen in itertools.permutations(mesh_dimension_to_size, assignment_size): assignments.append(dict(zip(s_dims_chosen, m_dims_chosen))) return assignments
en
0.833885
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Computes layouts for Mesh TensorFlow. Classes and methods to encode a Mesh TensorFlow computation as a series of Operations and then find a layout to minimize per-machine memory usage. Sample Usage: mtf_graph = mtf.Graph() mesh = mtf.Mesh(mtf_graph, "my_mesh") mesh_shape = mtf.convert_to_shape("m1:2;m2:2") # Add some operations to mesh using Mesh TensorFlow. estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape) optimizer = layout_optimizer.LayoutOptimizer(estimator) layout = optimizer.solve() Tries to compute a good layout for Mesh Tensorflow. Given a mesh shape (see Mesh TensorFlow) and several operations, computes a good layout (a mapping from TensorFlow dimensions to mesh dimensions) using integer programming. More formally, suppose T is the set of TensorFlow dimensions and M is the set of mesh dimensions. A layout L is a map from (T) to (M union {"unassigned"}), designating which tensor dimensions are split using which mesh dimensions. We wish to compute a layout that minimizes memory usage. Unfortunately, the memory usage doesn't just depend on the layout, but also on how the scheduler orders operations; whenever an operation is being performed, there are other tensors in memory besides that operation's input and output tensors. The layout, however, affects the size of the various tensors in memory, as well as *the amount of temporary memory the operation uses*. With this in mind, our (boolean) integer program to minimize memory is: Variables: x_{t->m}: "global" (boolean) variables; takes a value of 1 if t in T is assigned to m in M, and a value of 0 otherwise. y_{assignment}: "local" (boolean) variables; for every set of TensorFlow dimensions used in an operation or tensor and for every (valid) assignment from that set of TF dimensions to (M union {"unassigned"}), we have one of these which takes a value of 1 if the global variables completely agree with that assignment and 0 otherwise. z: memory (continuous) variable; the peak memory usage. Constraints: Operation Constraints: for every operation, no two dimensions used in that operation can be mapped to the same mesh dimension (since doing so would cause some of its computation to be skipped entirely). Global Constraints: we enforce that every TensorFlow dimension is assigned to at most one mesh dimension (it may be unassigned). (Optional) Divisibility Constraints: we enforce that a TensorFlow dimension can only be assigned to a mesh dimension if the latter's size evenly divides the former's size. Local Constraints: we enforce that out of all assignments that share a domain (i.e. set of TensorFlow dimensions), exactly one is chosen. Global-to-Local Constraints: we enforce that assignment(t) = m, then x_{t->m} must be 1 for y_{assignment} to be 1. We also enforce that if assignment(t) = "unassigned", then x_{t->m} must be 0 for all m in M. Memory Constraints: for every operation i, the peak memory usage z must be least the memory usage during that operation. The latter can be derived from memory_contents[i] and the local variables relevant to those tensors (their new sizes) and to the operation (temporary memory needed). Objective Function: We want to minimize the variable z. However, we want to tiebreak by the number of assigned dimensions (preferring more dimensions), so our modified objective is (#MTF Dimensions + 1) * z - sum x_{t->m}. Note that we prefer more splitting because in general splits result in smaller tensors and less duplicated work. Uses a auto_mtf.memory_estimator to set up the integer program. Args: memory_estimator: a memory_estimator.MemoryEstimator. scheduler_alg: an optional string, see scheduler.minimize_peak_memory. # [frozenset(string)] # Initialize the model. Computing useful input data structures to ease IP construction. # Compute the sets of MTF dimensions used in operations/tensors. # a {string: frozenset(string)}, mapping operation name to MTF dimension # names. # a {string: frozenset(string)}, mapping tensor name to MTF dimension names. # Compute possible assignments for each set of MTF dimensions. # indexed by MTF dimension set Initializing the variables of the IP. # Initialize global variables. # Indexed by (MTF dimension, mesh dimension) # Initialize local variables. # Indexed by (tensorflow dimension set), then name of # assignment. # TODO(joshuawang): Avoid hash collision no matter what dimension names # are; don't hash by this local var name, swap to using a tuple encoding # of the full assignment instead. # Initialize memory variable. We need a crude upper bound on memory, so we # use the total size of all tensors under the empty assignment. # NOTE(joshuawang): This bound could be improved by factoring in the # schedule. Adding constraints to the IP. # Add operation constraints. # Add global constraints. # Add divisibility constraints. # Add local constraints. # Add local-to-global constraints. # Add memory constraints. Builds the objective function of the IP. # Break ties in favor of more assignments. Runs the scheduler to determine memory contents at every point in time. Returns: a list of frozenset of strings, where the ith entry describes the tensors in memory when executing operation i (where schedule[i] is an index into GetAllOperationNames()). Solves the current integer program and returns the computed layout. Args: print_solution: An optional boolean indicating whether to print the full solution in human-readable format. Returns: The computed layout (as a string). Raises: SolverError: the internal solver could not find a solution, or the solution found is infeasible. # Solve and see how well the solver did. # TODO(joshuawang): Verify the solver's solution. # Reconstruct layout from solution. # Value is integer. The current objective value for the given layout. TODO(joshuawang): The current function does not check that the given layout is valid. Args: layout: a string, representing a layout to evaluate (e.g. "d_ff:m1;heads:m2"). Returns: A float, the objective value. # {string: float}, size of each tensor under our layout Name for a global variable. Args: splittable_dimension: the name of a splittable dimension (string) mesh_dimension: the name of a mesh dimension (string) Returns: A string, the variable name. Name for a local variable. Args: splittable_dimensions: frozenset of names of splittable dimensions. assignment: dict from names of splittable dimensions to names of mesh dimensions. Returns: A string, the variable name. Generates all ways to map splittable dimensions to mesh dimensions. Args: splittable_dimensions: a frozenset of the names of splittable dimensions. mesh_dimension_to_size: a dictionary from mesh dimension name to size. Returns: A list of the valid assignments. Each assignment is a dict keyed by every splittable dimension, whose value is either a mesh dimension or None.
2.978304
3
Source/A_Star_Search.py
TranPhu1999/Search_algorithm_with_visuallization
0
6631474
<filename>Source/A_Star_Search.py<gh_stars>0 import numpy as np from Support import visualize_maze, heuristic_euclidean, heuristic_manhatan, heuristic_euclidean_2 def a_star_search(maze_map,start,end, bonus = None): visited = [] queue = [(start[0],start[1])] # (x, y, heuristic + path cost, path cost) maze_map[start[0]][start[1]] = (0,0,0,0) while True: if len(queue)==0: print("Can not find any path!") break # find the block with the lowest cost to go to the End in the queue min = 0 for i in range(len(queue)): if maze_map[queue[i][0]][queue[i][1]][2] < maze_map[queue[min][0]][queue[min][1]][2]: min = i if queue[min][0] == end[0] and queue[min][1] == end[1]: break current = queue.pop(min) row = current[0] column = current[1] if (row,column) not in visited: visited.append((row,column)) # store coordinate of visited block for visuallize neighbor = [(row-1,column),(row,column+1),(row+1,column),(row,column-1)] # ^ > v < for i in range(len(neighbor)): x = neighbor[i][0] y = neighbor[i][1] # if neighbor is not wall and not in visited list if maze_map[x][y] != 'x' and (x,y) not in visited: # if neighbor is emty or have shorter path from the start new_cost = maze_map[row][column][3]+ 1 if (x,y) not in queue or maze_map[x][y][3] > new_cost: queue.append((x,y)) # save the coordinate of the current block as previous value maze_map[x][y] = (row,column,heuristic_euclidean_2((x,y),end)+new_cost,new_cost) # maze_map[x][y] = (row,column,heuristic_manhatan((x,y),end)+new_cost,new_cost) # maze_map[x][y] = (row,column,heuristic_euclidean((x,y),end)+new_cost,new_cost) route = [end[:2]] # recontruct path while route[-1][0] != start[0] or route[-1][1] != start[1]: route.append(maze_map[route[-1][0]][route[-1][1]][:2]) # visualize progress for i in range(len(visited)): if i < len(visited) - 1: visualize_maze(maze_map,bonus,start,end,[],visited[:i]) else: visualize_maze(maze_map,bonus,start,end,route,visited, len(route))
<filename>Source/A_Star_Search.py<gh_stars>0 import numpy as np from Support import visualize_maze, heuristic_euclidean, heuristic_manhatan, heuristic_euclidean_2 def a_star_search(maze_map,start,end, bonus = None): visited = [] queue = [(start[0],start[1])] # (x, y, heuristic + path cost, path cost) maze_map[start[0]][start[1]] = (0,0,0,0) while True: if len(queue)==0: print("Can not find any path!") break # find the block with the lowest cost to go to the End in the queue min = 0 for i in range(len(queue)): if maze_map[queue[i][0]][queue[i][1]][2] < maze_map[queue[min][0]][queue[min][1]][2]: min = i if queue[min][0] == end[0] and queue[min][1] == end[1]: break current = queue.pop(min) row = current[0] column = current[1] if (row,column) not in visited: visited.append((row,column)) # store coordinate of visited block for visuallize neighbor = [(row-1,column),(row,column+1),(row+1,column),(row,column-1)] # ^ > v < for i in range(len(neighbor)): x = neighbor[i][0] y = neighbor[i][1] # if neighbor is not wall and not in visited list if maze_map[x][y] != 'x' and (x,y) not in visited: # if neighbor is emty or have shorter path from the start new_cost = maze_map[row][column][3]+ 1 if (x,y) not in queue or maze_map[x][y][3] > new_cost: queue.append((x,y)) # save the coordinate of the current block as previous value maze_map[x][y] = (row,column,heuristic_euclidean_2((x,y),end)+new_cost,new_cost) # maze_map[x][y] = (row,column,heuristic_manhatan((x,y),end)+new_cost,new_cost) # maze_map[x][y] = (row,column,heuristic_euclidean((x,y),end)+new_cost,new_cost) route = [end[:2]] # recontruct path while route[-1][0] != start[0] or route[-1][1] != start[1]: route.append(maze_map[route[-1][0]][route[-1][1]][:2]) # visualize progress for i in range(len(visited)): if i < len(visited) - 1: visualize_maze(maze_map,bonus,start,end,[],visited[:i]) else: visualize_maze(maze_map,bonus,start,end,route,visited, len(route))
en
0.755473
# (x, y, heuristic + path cost, path cost) # find the block with the lowest cost to go to the End in the queue # store coordinate of visited block for visuallize # ^ > v < # if neighbor is not wall and not in visited list # if neighbor is emty or have shorter path from the start # save the coordinate of the current block as previous value # maze_map[x][y] = (row,column,heuristic_manhatan((x,y),end)+new_cost,new_cost) # maze_map[x][y] = (row,column,heuristic_euclidean((x,y),end)+new_cost,new_cost) # recontruct path # visualize progress
3.468321
3
torchwt/engine/utils/model_input.py
frankhart2018/torchwt
2
6631475
<filename>torchwt/engine/utils/model_input.py from dataclasses import dataclass @dataclass class ModelInput: model_spec_file: str hyperparameter_spec_file: str
<filename>torchwt/engine/utils/model_input.py from dataclasses import dataclass @dataclass class ModelInput: model_spec_file: str hyperparameter_spec_file: str
none
1
1.610036
2
tests/integration/data/customers.py
el-dot/securionpay-python
7
6631476
from tests.integration import random_email def valid_customer_req(email=random_email(), card=None): req = {"email": email} if card is not None: req["card"] = card return req
from tests.integration import random_email def valid_customer_req(email=random_email(), card=None): req = {"email": email} if card is not None: req["card"] = card return req
none
1
2.49195
2
test_looper_tests/test_manager_include_semantics.py
ufora/test-looper
3
6631477
<reponame>ufora/test-looper<filename>test_looper_tests/test_manager_include_semantics.py import unittest import os import logging import test_looper_tests.common as common import test_looper_tests.TestYamlFiles as TestYamlFiles import test_looper_tests.TestManagerTestHarness as TestManagerTestHarness import test_looper.data_model.BranchPinning as BranchPinning import test_looper.data_model.ImportExport as ImportExport import test_looper.data_model.TestDefinitionResolver as TestDefinitionResolver import textwrap common.configureLogging() class TestManagerIncludeSemanticsTests(unittest.TestCase): def test_basic_includes(self): repo_include_envdef = textwrap.dedent(""" looper_version: 4 environments: ${env_name}: platform: linux image: dockerfile_contents: hi variables: ${vname}: ${vdef} """) repo = textwrap.dedent(""" looper_version: 4 repos: include_from: reference: repo0/base includes: foreach: env_name: - e1 - e2 vname: - v1 vdef: - v2 repeat: - include_from/envdef.yml """) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base",[], "", {"envdef.yml": repo_include_envdef}) harness.manager.source_control.addCommit("repo0/c0", [], repo) resolver = harness.resolver() self.assertTrue(sorted(resolver.environmentsFor("repo0", "c0").keys()) == ["e1","e2"]) def test_include_includes(self): envdef2 = textwrap.dedent(""" looper_version: 4 repos: r: repo0/base """) envdef = textwrap.dedent(""" looper_version: 4 includes: - ./envdef2.yml """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() self.assertEqual(sorted(resolver.repoReferencesFor("repo0", "c0").keys()), ["r"]) def test_recursive_includes(self): envdef2 = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml """) envdef = textwrap.dedent(""" looper_version: 4 includes: - ./envdef2.yml """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() self.assertEqual(sorted(resolver.repoReferencesFor("repo0", "c0").keys()), []) def test_env_inheritance_in_included_files(self): lowest = textwrap.dedent(""" looper_version: 4 environments: root_env: platform: linux image: dockerfile_contents: hi derived: base: [root_env] """) repo = textwrap.dedent(""" looper_version: 4 repos: r: repo0/c0 includes: - r/lowest.yml environments: really_derived: base: [ derived ] """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], "looper_version: 4", {"lowest.yml": lowest}) harness.manager.source_control.addCommit("repo0/c1", [], repo) resolver = harness.resolver() self.assertEqual(sorted(resolver.environmentsFor("repo0", "c1").keys()), ["derived", "really_derived", "root_env"]) def test_recursive_includes_with_variables_that_expand_forever(self): envdef2 = textwrap.dedent(""" looper_version: 4 includes: - path: ./envdef.yml variables: var: v_${var} """) envdef = textwrap.dedent(""" looper_version: 4 includes: - path: ./envdef2.yml variables: var: v_${var} """) repo = textwrap.dedent(""" looper_version: 4 includes: - path: ./envdef.yml variables: var: v_0 """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() with self.assertRaises(TestDefinitionResolver.TestResolutionException): resolver.repoReferencesFor("repo0", "c0") def test_includes_cant_redefine_repos(self): envdef2 = textwrap.dedent(""" looper_version: 4 repos: r: repo0/base """) envdef = textwrap.dedent(""" looper_version: 4 repos: r: repo0/base2 """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base", [], None) harness.manager.source_control.addCommit("repo0/base2", [], None) harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() with self.assertRaises(TestDefinitionResolver.TestResolutionException): resolver.repoReferencesFor("repo0", "c0") def test_includes_cant_redefine_environments(self): envdef2 = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi """) envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base", [], None) harness.manager.source_control.addCommit("repo0/base2", [], None) harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() with self.assertRaises(TestDefinitionResolver.TestResolutionException): resolver.repoReferencesFor("repo0", "c0") def test_includes_cant_redefine_tests(self): envdef2 = textwrap.dedent(""" looper_version: 4 environments: e1: platform: linux image: dockerfile_contents: hi tests: t: environment: e1 command: "./script.py 1" """) envdef = textwrap.dedent(""" looper_version: 4 environments: e2: platform: linux image: dockerfile_contents: hi tests: t: environment: e2 command: "./script.py 1" """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base", [], None) harness.manager.source_control.addCommit("repo0/base2", [], None) harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() with self.assertRaises(TestDefinitionResolver.TestResolutionException): resolver.repoReferencesFor("repo0", "c0") def test_includes_can_share_environments(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi tests: t1/e: command: "./script.py 1" """) envdef2 = textwrap.dedent(""" looper_version: 4 tests: t2/e: command: "./script.py 1" """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base", [], None) harness.manager.source_control.addCommit("repo0/base2", [], None) harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() self.assertEqual(sorted(resolver.testDefinitionsFor("repo0", "c0")), ["t1/e", "t2/e"]) def test_includes_use_correct_repo(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi tests: t1/e: command: "./script.py 1" """) envdef2 = textwrap.dedent(""" looper_version: 4 tests: t2/e: command: "./script.py 1" """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base", [], None) harness.manager.source_control.addCommit("repo0/base2", [], None) harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() self.assertEqual(sorted(resolver.testDefinitionsFor("repo0", "c0")), ["t1/e", "t2/e"]) def test_bad_include_preserves_pins(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: not_valid image: dockerfile_contents: hi tests: t1/e: command: "./script.py 1" """) repo = textwrap.dedent(""" looper_version: 4 repos: r: reference: repo0/c0 branch: master auto: true includes: - r/envdef.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], None, {"envdef.yml": envdef}) harness.manager.source_control.addCommit("repo0/c1", ["repo0/c0"], None, {"envdef.yml": envdef.replace("not_valid","linux")}) harness.manager.source_control.addCommit("repo0/test", [], repo) harness.manager.source_control.setBranch("repo0/master", "repo0/c0") harness.manager.source_control.setBranch("repo0/tester", "repo0/test") harness.markRepoListDirty() harness.consumeBackgroundTasks() with harness.database.view(): commit = harness.getCommit("repo0/test") self.assertEqual(len(commit.data.repos), 1) self.assertEqual(len(commit.data.tests), 0) branch = harness.database.Branch.lookupOne(reponame_and_branchname=("repo0","tester")) pins = harness.database.BranchPin.lookupAll(branch=branch) self.assertEqual(len(pins),1) self.assertTrue(pins[0].auto) harness.manager.source_control.setBranch("repo0/master", "repo0/c1") harness.markRepoListDirty() harness.consumeBackgroundTasks() with harness.database.view(): commit = branch.head self.assertEqual(len(commit.data.repos), 1) self.assertEqual(len(commit.data.tests), 1) def test_environment_overrides(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi test_stages: - command: preCommand e2: base: e test_stages: - command: preCommand2 tests: t1/e2: command: actualCommand """) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], envdef) resolver = harness.resolver() test = resolver.testDefinitionsFor("repo0", "c0")["t1/e2"] self.assertEqual([s.command for s in test.stages], ["preCommand", "preCommand2", "actualCommand"]) def test_configuration_override(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi test_stages: - command: preCommand test_configuration: override_at_root e2: base: [] test_configuration: override_at_mixin tests: t1: environment: e command: actualCommand t2: environment: e mixins: [e2] command: actualCommand t3: environment: e mixins: [e2] configuration: override_at_test_level command: actualCommand """) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], envdef) resolver = harness.resolver() self.assertEqual(resolver.testDefinitionsFor("repo0", "c0")["t1"].configuration, "override_at_root") self.assertEqual(resolver.testDefinitionsFor("repo0", "c0")["t2"].configuration, "override_at_mixin") self.assertEqual(resolver.testDefinitionsFor("repo0", "c0")["t3"].configuration, "override_at_test_level") def test_prioritization_filters(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi tests: foreach: name: [t1, t2] env: [e1, e2] repeat: ${name}/${env}: environment: e command: cmd prioritize: - 't1/*' - '*/e2' """) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], envdef) resolver = harness.resolver() tests = resolver.testDefinitionsFor("repo0", "c0").values() self.assertEqual( set([t.name for t in tests if not t.disabled]), set(["t1/e1","t1/e2","t2/e2"]) ) def test_environment_mixins(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi variables: v: e test_stages: - command: preCommand e2: base: [] test_stages: - command: preCommand2 variables: v: e2 tests: t1/e: mixins: [e2] command: actualCommand - v=${v} """) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], envdef) resolver = harness.resolver() test = resolver.testDefinitionsFor("repo0", "c0")["t1/e"] self.assertEqual([s.command for s in test.stages], ["preCommand" ,"preCommand2", "actualCommand - v=e2"]) print test
import unittest import os import logging import test_looper_tests.common as common import test_looper_tests.TestYamlFiles as TestYamlFiles import test_looper_tests.TestManagerTestHarness as TestManagerTestHarness import test_looper.data_model.BranchPinning as BranchPinning import test_looper.data_model.ImportExport as ImportExport import test_looper.data_model.TestDefinitionResolver as TestDefinitionResolver import textwrap common.configureLogging() class TestManagerIncludeSemanticsTests(unittest.TestCase): def test_basic_includes(self): repo_include_envdef = textwrap.dedent(""" looper_version: 4 environments: ${env_name}: platform: linux image: dockerfile_contents: hi variables: ${vname}: ${vdef} """) repo = textwrap.dedent(""" looper_version: 4 repos: include_from: reference: repo0/base includes: foreach: env_name: - e1 - e2 vname: - v1 vdef: - v2 repeat: - include_from/envdef.yml """) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base",[], "", {"envdef.yml": repo_include_envdef}) harness.manager.source_control.addCommit("repo0/c0", [], repo) resolver = harness.resolver() self.assertTrue(sorted(resolver.environmentsFor("repo0", "c0").keys()) == ["e1","e2"]) def test_include_includes(self): envdef2 = textwrap.dedent(""" looper_version: 4 repos: r: repo0/base """) envdef = textwrap.dedent(""" looper_version: 4 includes: - ./envdef2.yml """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() self.assertEqual(sorted(resolver.repoReferencesFor("repo0", "c0").keys()), ["r"]) def test_recursive_includes(self): envdef2 = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml """) envdef = textwrap.dedent(""" looper_version: 4 includes: - ./envdef2.yml """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() self.assertEqual(sorted(resolver.repoReferencesFor("repo0", "c0").keys()), []) def test_env_inheritance_in_included_files(self): lowest = textwrap.dedent(""" looper_version: 4 environments: root_env: platform: linux image: dockerfile_contents: hi derived: base: [root_env] """) repo = textwrap.dedent(""" looper_version: 4 repos: r: repo0/c0 includes: - r/lowest.yml environments: really_derived: base: [ derived ] """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], "looper_version: 4", {"lowest.yml": lowest}) harness.manager.source_control.addCommit("repo0/c1", [], repo) resolver = harness.resolver() self.assertEqual(sorted(resolver.environmentsFor("repo0", "c1").keys()), ["derived", "really_derived", "root_env"]) def test_recursive_includes_with_variables_that_expand_forever(self): envdef2 = textwrap.dedent(""" looper_version: 4 includes: - path: ./envdef.yml variables: var: v_${var} """) envdef = textwrap.dedent(""" looper_version: 4 includes: - path: ./envdef2.yml variables: var: v_${var} """) repo = textwrap.dedent(""" looper_version: 4 includes: - path: ./envdef.yml variables: var: v_0 """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() with self.assertRaises(TestDefinitionResolver.TestResolutionException): resolver.repoReferencesFor("repo0", "c0") def test_includes_cant_redefine_repos(self): envdef2 = textwrap.dedent(""" looper_version: 4 repos: r: repo0/base """) envdef = textwrap.dedent(""" looper_version: 4 repos: r: repo0/base2 """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base", [], None) harness.manager.source_control.addCommit("repo0/base2", [], None) harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() with self.assertRaises(TestDefinitionResolver.TestResolutionException): resolver.repoReferencesFor("repo0", "c0") def test_includes_cant_redefine_environments(self): envdef2 = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi """) envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base", [], None) harness.manager.source_control.addCommit("repo0/base2", [], None) harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() with self.assertRaises(TestDefinitionResolver.TestResolutionException): resolver.repoReferencesFor("repo0", "c0") def test_includes_cant_redefine_tests(self): envdef2 = textwrap.dedent(""" looper_version: 4 environments: e1: platform: linux image: dockerfile_contents: hi tests: t: environment: e1 command: "./script.py 1" """) envdef = textwrap.dedent(""" looper_version: 4 environments: e2: platform: linux image: dockerfile_contents: hi tests: t: environment: e2 command: "./script.py 1" """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base", [], None) harness.manager.source_control.addCommit("repo0/base2", [], None) harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() with self.assertRaises(TestDefinitionResolver.TestResolutionException): resolver.repoReferencesFor("repo0", "c0") def test_includes_can_share_environments(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi tests: t1/e: command: "./script.py 1" """) envdef2 = textwrap.dedent(""" looper_version: 4 tests: t2/e: command: "./script.py 1" """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base", [], None) harness.manager.source_control.addCommit("repo0/base2", [], None) harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() self.assertEqual(sorted(resolver.testDefinitionsFor("repo0", "c0")), ["t1/e", "t2/e"]) def test_includes_use_correct_repo(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi tests: t1/e: command: "./script.py 1" """) envdef2 = textwrap.dedent(""" looper_version: 4 tests: t2/e: command: "./script.py 1" """) repo = textwrap.dedent(""" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/base", [], None) harness.manager.source_control.addCommit("repo0/base2", [], None) harness.manager.source_control.addCommit("repo0/c0", [], repo, {"envdef.yml": envdef, "envdef2.yml": envdef2}) resolver = harness.resolver() self.assertEqual(sorted(resolver.testDefinitionsFor("repo0", "c0")), ["t1/e", "t2/e"]) def test_bad_include_preserves_pins(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: not_valid image: dockerfile_contents: hi tests: t1/e: command: "./script.py 1" """) repo = textwrap.dedent(""" looper_version: 4 repos: r: reference: repo0/c0 branch: master auto: true includes: - r/envdef.yml """ ) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], None, {"envdef.yml": envdef}) harness.manager.source_control.addCommit("repo0/c1", ["repo0/c0"], None, {"envdef.yml": envdef.replace("not_valid","linux")}) harness.manager.source_control.addCommit("repo0/test", [], repo) harness.manager.source_control.setBranch("repo0/master", "repo0/c0") harness.manager.source_control.setBranch("repo0/tester", "repo0/test") harness.markRepoListDirty() harness.consumeBackgroundTasks() with harness.database.view(): commit = harness.getCommit("repo0/test") self.assertEqual(len(commit.data.repos), 1) self.assertEqual(len(commit.data.tests), 0) branch = harness.database.Branch.lookupOne(reponame_and_branchname=("repo0","tester")) pins = harness.database.BranchPin.lookupAll(branch=branch) self.assertEqual(len(pins),1) self.assertTrue(pins[0].auto) harness.manager.source_control.setBranch("repo0/master", "repo0/c1") harness.markRepoListDirty() harness.consumeBackgroundTasks() with harness.database.view(): commit = branch.head self.assertEqual(len(commit.data.repos), 1) self.assertEqual(len(commit.data.tests), 1) def test_environment_overrides(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi test_stages: - command: preCommand e2: base: e test_stages: - command: preCommand2 tests: t1/e2: command: actualCommand """) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], envdef) resolver = harness.resolver() test = resolver.testDefinitionsFor("repo0", "c0")["t1/e2"] self.assertEqual([s.command for s in test.stages], ["preCommand", "preCommand2", "actualCommand"]) def test_configuration_override(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi test_stages: - command: preCommand test_configuration: override_at_root e2: base: [] test_configuration: override_at_mixin tests: t1: environment: e command: actualCommand t2: environment: e mixins: [e2] command: actualCommand t3: environment: e mixins: [e2] configuration: override_at_test_level command: actualCommand """) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], envdef) resolver = harness.resolver() self.assertEqual(resolver.testDefinitionsFor("repo0", "c0")["t1"].configuration, "override_at_root") self.assertEqual(resolver.testDefinitionsFor("repo0", "c0")["t2"].configuration, "override_at_mixin") self.assertEqual(resolver.testDefinitionsFor("repo0", "c0")["t3"].configuration, "override_at_test_level") def test_prioritization_filters(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi tests: foreach: name: [t1, t2] env: [e1, e2] repeat: ${name}/${env}: environment: e command: cmd prioritize: - 't1/*' - '*/e2' """) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], envdef) resolver = harness.resolver() tests = resolver.testDefinitionsFor("repo0", "c0").values() self.assertEqual( set([t.name for t in tests if not t.disabled]), set(["t1/e1","t1/e2","t2/e2"]) ) def test_environment_mixins(self): envdef = textwrap.dedent(""" looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi variables: v: e test_stages: - command: preCommand e2: base: [] test_stages: - command: preCommand2 variables: v: e2 tests: t1/e: mixins: [e2] command: actualCommand - v=${v} """) harness = TestManagerTestHarness.getHarness() harness.manager.source_control.addCommit("repo0/c0", [], envdef) resolver = harness.resolver() test = resolver.testDefinitionsFor("repo0", "c0")["t1/e"] self.assertEqual([s.command for s in test.stages], ["preCommand" ,"preCommand2", "actualCommand - v=e2"]) print test
en
0.548165
looper_version: 4 environments: ${env_name}: platform: linux image: dockerfile_contents: hi variables: ${vname}: ${vdef} looper_version: 4 repos: include_from: reference: repo0/base includes: foreach: env_name: - e1 - e2 vname: - v1 vdef: - v2 repeat: - include_from/envdef.yml looper_version: 4 repos: r: repo0/base looper_version: 4 includes: - ./envdef2.yml looper_version: 4 includes: - ./envdef.yml looper_version: 4 includes: - ./envdef.yml looper_version: 4 includes: - ./envdef2.yml looper_version: 4 includes: - ./envdef.yml looper_version: 4 environments: root_env: platform: linux image: dockerfile_contents: hi derived: base: [root_env] looper_version: 4 repos: r: repo0/c0 includes: - r/lowest.yml environments: really_derived: base: [ derived ] looper_version: 4 includes: - path: ./envdef.yml variables: var: v_${var} looper_version: 4 includes: - path: ./envdef2.yml variables: var: v_${var} looper_version: 4 includes: - path: ./envdef.yml variables: var: v_0 looper_version: 4 repos: r: repo0/base looper_version: 4 repos: r: repo0/base2 looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml looper_version: 4 environments: e1: platform: linux image: dockerfile_contents: hi tests: t: environment: e1 command: "./script.py 1" looper_version: 4 environments: e2: platform: linux image: dockerfile_contents: hi tests: t: environment: e2 command: "./script.py 1" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi tests: t1/e: command: "./script.py 1" looper_version: 4 tests: t2/e: command: "./script.py 1" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi tests: t1/e: command: "./script.py 1" looper_version: 4 tests: t2/e: command: "./script.py 1" looper_version: 4 includes: - ./envdef.yml - ./envdef2.yml looper_version: 4 environments: e: platform: not_valid image: dockerfile_contents: hi tests: t1/e: command: "./script.py 1" looper_version: 4 repos: r: reference: repo0/c0 branch: master auto: true includes: - r/envdef.yml looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi test_stages: - command: preCommand e2: base: e test_stages: - command: preCommand2 tests: t1/e2: command: actualCommand looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi test_stages: - command: preCommand test_configuration: override_at_root e2: base: [] test_configuration: override_at_mixin tests: t1: environment: e command: actualCommand t2: environment: e mixins: [e2] command: actualCommand t3: environment: e mixins: [e2] configuration: override_at_test_level command: actualCommand looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi tests: foreach: name: [t1, t2] env: [e1, e2] repeat: ${name}/${env}: environment: e command: cmd prioritize: - 't1/*' - '*/e2' looper_version: 4 environments: e: platform: linux image: dockerfile_contents: hi variables: v: e test_stages: - command: preCommand e2: base: [] test_stages: - command: preCommand2 variables: v: e2 tests: t1/e: mixins: [e2] command: actualCommand - v=${v}
2.14716
2
beets/autotag/hooks.py
Thynix/beets
1
6631478
<gh_stars>1-10 # This file is part of beets. # Copyright 2011, <NAME>. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Glue between metadata sources and the matching logic.""" from beets import plugins from beets.autotag import mb # Classes used to represent candidate options. class AlbumInfo(object): """Describes a canonical release that may be used to match a release in the library. Consists of these data members: - ``album``: the release title - ``album_id``: MusicBrainz ID; UUID fragment only - ``artist``: name of the release's primary artist - ``artist_id`` - ``tracks``: list of TrackInfo objects making up the release - ``asin``: Amazon ASIN - ``albumtype``: string describing the kind of release - ``va``: boolean: whether the release has "various artists" - ``year``: release year - ``month``: release month - ``day``: release day - ``label``: music label responsible for the release - ``mediums``: the number of discs in this release - ``artist_sort``: name of the release's artist for sorting - ``releasegroup_id``: MBID for the album's release group - ``catalognum``: the label's catalog number for the release - ``script``: character set used for metadata - ``language``: human language of the metadata - ``country``: the release country - ``albumstatus``: MusicBrainz release status (Official, etc.) - ``media``: delivery mechanism (Vinyl, etc.) - ``albumdisambig``: MusicBrainz release disambiguation comment The fields up through ``tracks`` are required. The others are optional and may be None. """ def __init__(self, album, album_id, artist, artist_id, tracks, asin=None, albumtype=None, va=False, year=None, month=None, day=None, label=None, mediums=None, artist_sort=None, releasegroup_id=None, catalognum=None, script=None, language=None, country=None, albumstatus=None, media=None, albumdisambig=None): self.album = album self.album_id = album_id self.artist = artist self.artist_id = artist_id self.tracks = tracks self.asin = asin self.albumtype = albumtype self.va = va self.year = year self.month = month self.day = day self.label = label self.mediums = mediums self.artist_sort = artist_sort self.releasegroup_id = releasegroup_id self.catalognum = catalognum self.script = script self.language = language self.country = country self.albumstatus = albumstatus self.media = media self.albumdisambig = albumdisambig class TrackInfo(object): """Describes a canonical track present on a release. Appears as part of an AlbumInfo's ``tracks`` list. Consists of these data members: - ``title``: name of the track - ``track_id``: MusicBrainz ID; UUID fragment only - ``artist``: individual track artist name - ``artist_id`` - ``length``: float: duration of the track in seconds - ``medium``: the disc number this track appears on in the album - ``medium_index``: the track's position on the disc - ``artist_sort``: name of the track artist for sorting - ``disctitle``: name of the individual medium (subtitle) Only ``title`` and ``track_id`` are required. The rest of the fields may be None. """ def __init__(self, title, track_id, artist=None, artist_id=None, length=None, medium=None, medium_index=None, artist_sort=None, disctitle=None): self.title = title self.track_id = track_id self.artist = artist self.artist_id = artist_id self.length = length self.medium = medium self.medium_index = medium_index self.artist_sort = artist_sort self.disctitle = disctitle # Aggregation of sources. def _album_for_id(album_id): """Get an album corresponding to a MusicBrainz release ID.""" return mb.album_for_id(album_id) def _track_for_id(track_id): """Get an item for a recording MBID.""" return mb.track_for_id(track_id) def _album_candidates(items, artist, album, va_likely): """Search for album matches. ``items`` is a list of Item objects that make up the album. ``artist`` and ``album`` are the respective names (strings), which may be derived from the item list or may be entered by the user. ``va_likely`` is a boolean indicating whether the album is likely to be a "various artists" release. """ out = [] # Base candidates if we have album and artist to match. if artist and album: out.extend(mb.match_album(artist, album, len(items))) # Also add VA matches from MusicBrainz where appropriate. if va_likely and album: out.extend(mb.match_album(None, album, len(items))) # Candidates from plugins. out.extend(plugins.candidates(items)) return out def _item_candidates(item, artist, title): """Search for item matches. ``item`` is the Item to be matched. ``artist`` and ``title`` are strings and either reflect the item or are specified by the user. """ out = [] # MusicBrainz candidates. if artist and title: out.extend(mb.match_track(artist, title)) # Plugin candidates. out.extend(plugins.item_candidates(item)) return out
# This file is part of beets. # Copyright 2011, <NAME>. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Glue between metadata sources and the matching logic.""" from beets import plugins from beets.autotag import mb # Classes used to represent candidate options. class AlbumInfo(object): """Describes a canonical release that may be used to match a release in the library. Consists of these data members: - ``album``: the release title - ``album_id``: MusicBrainz ID; UUID fragment only - ``artist``: name of the release's primary artist - ``artist_id`` - ``tracks``: list of TrackInfo objects making up the release - ``asin``: Amazon ASIN - ``albumtype``: string describing the kind of release - ``va``: boolean: whether the release has "various artists" - ``year``: release year - ``month``: release month - ``day``: release day - ``label``: music label responsible for the release - ``mediums``: the number of discs in this release - ``artist_sort``: name of the release's artist for sorting - ``releasegroup_id``: MBID for the album's release group - ``catalognum``: the label's catalog number for the release - ``script``: character set used for metadata - ``language``: human language of the metadata - ``country``: the release country - ``albumstatus``: MusicBrainz release status (Official, etc.) - ``media``: delivery mechanism (Vinyl, etc.) - ``albumdisambig``: MusicBrainz release disambiguation comment The fields up through ``tracks`` are required. The others are optional and may be None. """ def __init__(self, album, album_id, artist, artist_id, tracks, asin=None, albumtype=None, va=False, year=None, month=None, day=None, label=None, mediums=None, artist_sort=None, releasegroup_id=None, catalognum=None, script=None, language=None, country=None, albumstatus=None, media=None, albumdisambig=None): self.album = album self.album_id = album_id self.artist = artist self.artist_id = artist_id self.tracks = tracks self.asin = asin self.albumtype = albumtype self.va = va self.year = year self.month = month self.day = day self.label = label self.mediums = mediums self.artist_sort = artist_sort self.releasegroup_id = releasegroup_id self.catalognum = catalognum self.script = script self.language = language self.country = country self.albumstatus = albumstatus self.media = media self.albumdisambig = albumdisambig class TrackInfo(object): """Describes a canonical track present on a release. Appears as part of an AlbumInfo's ``tracks`` list. Consists of these data members: - ``title``: name of the track - ``track_id``: MusicBrainz ID; UUID fragment only - ``artist``: individual track artist name - ``artist_id`` - ``length``: float: duration of the track in seconds - ``medium``: the disc number this track appears on in the album - ``medium_index``: the track's position on the disc - ``artist_sort``: name of the track artist for sorting - ``disctitle``: name of the individual medium (subtitle) Only ``title`` and ``track_id`` are required. The rest of the fields may be None. """ def __init__(self, title, track_id, artist=None, artist_id=None, length=None, medium=None, medium_index=None, artist_sort=None, disctitle=None): self.title = title self.track_id = track_id self.artist = artist self.artist_id = artist_id self.length = length self.medium = medium self.medium_index = medium_index self.artist_sort = artist_sort self.disctitle = disctitle # Aggregation of sources. def _album_for_id(album_id): """Get an album corresponding to a MusicBrainz release ID.""" return mb.album_for_id(album_id) def _track_for_id(track_id): """Get an item for a recording MBID.""" return mb.track_for_id(track_id) def _album_candidates(items, artist, album, va_likely): """Search for album matches. ``items`` is a list of Item objects that make up the album. ``artist`` and ``album`` are the respective names (strings), which may be derived from the item list or may be entered by the user. ``va_likely`` is a boolean indicating whether the album is likely to be a "various artists" release. """ out = [] # Base candidates if we have album and artist to match. if artist and album: out.extend(mb.match_album(artist, album, len(items))) # Also add VA matches from MusicBrainz where appropriate. if va_likely and album: out.extend(mb.match_album(None, album, len(items))) # Candidates from plugins. out.extend(plugins.candidates(items)) return out def _item_candidates(item, artist, title): """Search for item matches. ``item`` is the Item to be matched. ``artist`` and ``title`` are strings and either reflect the item or are specified by the user. """ out = [] # MusicBrainz candidates. if artist and title: out.extend(mb.match_track(artist, title)) # Plugin candidates. out.extend(plugins.item_candidates(item)) return out
en
0.830135
# This file is part of beets. # Copyright 2011, <NAME>. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. Glue between metadata sources and the matching logic. # Classes used to represent candidate options. Describes a canonical release that may be used to match a release in the library. Consists of these data members: - ``album``: the release title - ``album_id``: MusicBrainz ID; UUID fragment only - ``artist``: name of the release's primary artist - ``artist_id`` - ``tracks``: list of TrackInfo objects making up the release - ``asin``: Amazon ASIN - ``albumtype``: string describing the kind of release - ``va``: boolean: whether the release has "various artists" - ``year``: release year - ``month``: release month - ``day``: release day - ``label``: music label responsible for the release - ``mediums``: the number of discs in this release - ``artist_sort``: name of the release's artist for sorting - ``releasegroup_id``: MBID for the album's release group - ``catalognum``: the label's catalog number for the release - ``script``: character set used for metadata - ``language``: human language of the metadata - ``country``: the release country - ``albumstatus``: MusicBrainz release status (Official, etc.) - ``media``: delivery mechanism (Vinyl, etc.) - ``albumdisambig``: MusicBrainz release disambiguation comment The fields up through ``tracks`` are required. The others are optional and may be None. Describes a canonical track present on a release. Appears as part of an AlbumInfo's ``tracks`` list. Consists of these data members: - ``title``: name of the track - ``track_id``: MusicBrainz ID; UUID fragment only - ``artist``: individual track artist name - ``artist_id`` - ``length``: float: duration of the track in seconds - ``medium``: the disc number this track appears on in the album - ``medium_index``: the track's position on the disc - ``artist_sort``: name of the track artist for sorting - ``disctitle``: name of the individual medium (subtitle) Only ``title`` and ``track_id`` are required. The rest of the fields may be None. # Aggregation of sources. Get an album corresponding to a MusicBrainz release ID. Get an item for a recording MBID. Search for album matches. ``items`` is a list of Item objects that make up the album. ``artist`` and ``album`` are the respective names (strings), which may be derived from the item list or may be entered by the user. ``va_likely`` is a boolean indicating whether the album is likely to be a "various artists" release. # Base candidates if we have album and artist to match. # Also add VA matches from MusicBrainz where appropriate. # Candidates from plugins. Search for item matches. ``item`` is the Item to be matched. ``artist`` and ``title`` are strings and either reflect the item or are specified by the user. # MusicBrainz candidates. # Plugin candidates.
1.777026
2
commands/relpath.py
dtebbs/tzbuild
0
6631479
<filename>commands/relpath.py<gh_stars>0 from os.path import relpath as path_relpath from sys import argv def usage(): print "Usage: %s <path> [<path> ...]" print "" def relpath(): args = argv[1:] if 0 == len(args): usage() exit(1) for a in args: print path_relpath(a, ".") return 0 if "__main__" == __name__: exit(relpath())
<filename>commands/relpath.py<gh_stars>0 from os.path import relpath as path_relpath from sys import argv def usage(): print "Usage: %s <path> [<path> ...]" print "" def relpath(): args = argv[1:] if 0 == len(args): usage() exit(1) for a in args: print path_relpath(a, ".") return 0 if "__main__" == __name__: exit(relpath())
none
1
3.049256
3
flag_engine/features/models.py
Flagsmith/flagsmith-engine
4
6631480
<reponame>Flagsmith/flagsmith-engine<filename>flag_engine/features/models.py import typing import uuid from dataclasses import dataclass, field from flag_engine.utils.hashing import get_hashed_percentage_for_object_ids @dataclass class FeatureModel: id: int name: str type: str def __eq__(self, other): return self.id == other.id def __hash__(self): return hash(self.id) @dataclass class MultivariateFeatureOptionModel: value: typing.Any id: int = None @dataclass class MultivariateFeatureStateValueModel: multivariate_feature_option: MultivariateFeatureOptionModel percentage_allocation: float id: int = None mv_fs_value_uuid: str = field(default_factory=uuid.uuid4) @dataclass class FeatureStateModel: feature: FeatureModel enabled: bool django_id: int = None featurestate_uuid: str = field(default_factory=uuid.uuid4) _value: typing.Any = field(default=None, init=False) multivariate_feature_state_values: typing.List[ MultivariateFeatureStateValueModel ] = field(default_factory=list) def set_value(self, value: typing.Any): self._value = value def get_value(self, identity_id: int = None): if identity_id and len(self.multivariate_feature_state_values) > 0: return self._get_multivariate_value(identity_id) return self._value def get_feature_state_value(self): """Mimick django method name to simplify serialization logic""" return self.get_value() def _get_multivariate_value(self, identity_id: int) -> typing.Any: percentage_value = get_hashed_percentage_for_object_ids( [self.django_id or self.featurestate_uuid, identity_id] ) # Iterate over the mv options in order of id (so we get the same value each # time) to determine the correct value to return to the identity based on # the percentage allocations of the multivariate options. This gives us a # way to ensure that the same value is returned every time we use the same # percentage value. start_percentage = 0 for mv_value in sorted( self.multivariate_feature_state_values, key=lambda v: v.id or v.mv_fs_value_uuid, ): limit = mv_value.percentage_allocation + start_percentage if start_percentage <= percentage_value < limit: return mv_value.multivariate_feature_option.value start_percentage = limit # default to return the control value if no MV values found, although this # should never happen return self._value
import typing import uuid from dataclasses import dataclass, field from flag_engine.utils.hashing import get_hashed_percentage_for_object_ids @dataclass class FeatureModel: id: int name: str type: str def __eq__(self, other): return self.id == other.id def __hash__(self): return hash(self.id) @dataclass class MultivariateFeatureOptionModel: value: typing.Any id: int = None @dataclass class MultivariateFeatureStateValueModel: multivariate_feature_option: MultivariateFeatureOptionModel percentage_allocation: float id: int = None mv_fs_value_uuid: str = field(default_factory=uuid.uuid4) @dataclass class FeatureStateModel: feature: FeatureModel enabled: bool django_id: int = None featurestate_uuid: str = field(default_factory=uuid.uuid4) _value: typing.Any = field(default=None, init=False) multivariate_feature_state_values: typing.List[ MultivariateFeatureStateValueModel ] = field(default_factory=list) def set_value(self, value: typing.Any): self._value = value def get_value(self, identity_id: int = None): if identity_id and len(self.multivariate_feature_state_values) > 0: return self._get_multivariate_value(identity_id) return self._value def get_feature_state_value(self): """Mimick django method name to simplify serialization logic""" return self.get_value() def _get_multivariate_value(self, identity_id: int) -> typing.Any: percentage_value = get_hashed_percentage_for_object_ids( [self.django_id or self.featurestate_uuid, identity_id] ) # Iterate over the mv options in order of id (so we get the same value each # time) to determine the correct value to return to the identity based on # the percentage allocations of the multivariate options. This gives us a # way to ensure that the same value is returned every time we use the same # percentage value. start_percentage = 0 for mv_value in sorted( self.multivariate_feature_state_values, key=lambda v: v.id or v.mv_fs_value_uuid, ): limit = mv_value.percentage_allocation + start_percentage if start_percentage <= percentage_value < limit: return mv_value.multivariate_feature_option.value start_percentage = limit # default to return the control value if no MV values found, although this # should never happen return self._value
en
0.786882
Mimick django method name to simplify serialization logic # Iterate over the mv options in order of id (so we get the same value each # time) to determine the correct value to return to the identity based on # the percentage allocations of the multivariate options. This gives us a # way to ensure that the same value is returned every time we use the same # percentage value. # default to return the control value if no MV values found, although this # should never happen
2.208433
2
face_sdk/api_usage/actually_run_all.py
subin4420/doitagain
0
6631481
<reponame>subin4420/doitagain import sys import face_merge sys.path.append('.') import logging mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) import logging.config logging.config.fileConfig("config/logging.conf") logger = logging.getLogger('api') import yaml import cv2 import numpy as np #-----------------detect------------------- from core.model_loader.face_detection.FaceDetModelLoader import FaceDetModelLoader from core.model_handler.face_detection.FaceDetModelHandler import FaceDetModelHandler #-----------------align------------------- from core.model_loader.face_alignment.FaceAlignModelLoader import FaceAlignModelLoader from core.model_handler.face_alignment.FaceAlignModelHandler import FaceAlignModelHandler #----------------crop--------------------- from core.image_cropper.arcface_cropper.FaceRecImageCropper import FaceRecImageCropper #----------------------feature---------------------- from core.model_loader.face_recognition.FaceRecModelLoader import FaceRecModelLoader from core.model_handler.face_recognition.FaceRecModelHandler import FaceRecModelHandler with open('config/model_conf.yaml') as f: model_conf = yaml.load(f, Loader=yaml.Loader) if __name__ == '__main__': # common setting for all model, need not modify. model_path = 'models' # model setting, modified along with model scene = 'non-mask' model_category = 'face_detection' model_name = model_conf[scene][model_category] # load model try: faceDetModelLoader = FaceDetModelLoader(model_path, model_category, model_name) except Exception as e: logger.error('Failed to parse model configuration file!') logger.error(e) sys.exit(-1) try: model, cfg = faceDetModelLoader.load_model() except Exception as e: logger.error('Model loading failed!') logger.error(e) sys.exit(-1) # read image image_path_pic1 = 'api_usage/temp_pic/pic1.jpg' image_path_pic2 = 'api_usage/temp_pic/pic2.jpg' image1 = cv2.imread(image_path_pic1, cv2.IMREAD_COLOR) image2 = cv2.imread(image_path_pic2, cv2.IMREAD_COLOR) ''' cv2.imshow("pic1", image1) cv2.waitKey() cv2.destroyAllWindows() cv2.imshow("pic2", image2) cv2.waitKey() cv2.destroyAllWindows() ''' faceDetModelHandler = FaceDetModelHandler(model, 'cpu', cfg) try: dets_pic1 = faceDetModelHandler.inference_on_image(image1) dets_pic2 = faceDetModelHandler.inference_on_image(image2) except Exception as e: logger.error('Face detection failed!') logger.error(e) sys.exit(-1) pic1_box = dets_pic1 line1 = str(int(pic1_box[0][0])) + " " + str(int(pic1_box[0][1])) + " " + \ str(int(pic1_box[0][2])) + " " + str(int(pic1_box[0][3])) + " " + \ str(pic1_box[0][4]) + " \n" pic2_box = dets_pic2 line2 = str(int(pic2_box[0][0])) + " " + str(int(pic2_box[0][1])) + " " + \ str(int(pic2_box[0][2])) + " " + str(int(pic2_box[0][3])) + " " + \ str(pic2_box[0][4]) + " \n" #★☆★★☆★★☆★★☆★detect fin★☆★★☆★★☆★★☆★★☆★ #-------------------alignment start------------------------- #change model_category model_category = 'face_alignment' model_name = model_conf[scene][model_category] try: faceAlignModelLoader = FaceAlignModelLoader(model_path, model_category, model_name) except Exception as e: logger.error('Failed to parse model configuration file!') logger.error(e) sys.exit(-1) try: model, cfg = faceAlignModelLoader.load_model() except Exception as e: logger.error('Model loading failed!') logger.error(e) sys.exit(-1) faceAlignModelHandler = FaceAlignModelHandler(model, 'cpu', cfg) image_path_pic1 = 'api_usage/temp_pic/pic1.jpg' image_det_txt_path_pic1 = 'api_usage/temp_pic/pic1_detect_res.txt' image_pic1 = cv2.imread(image_path_pic1, cv2.IMREAD_COLOR) image_path_pic2 = 'api_usage/temp_pic/pic2.jpg' image_det_txt_path_pic2 = 'api_usage/temp_pic/pic2_detect_res.txt' image_pic2 = cv2.imread(image_path_pic2, cv2.IMREAD_COLOR) # ----------------------pic landmark---------------------- try: line1 = line1.strip().split() det_pic1 = np.asarray(list(map(int, line1[0:4])), dtype=np.int32) landmarks_pic1 = faceAlignModelHandler.inference_on_image(image1, det_pic1) line2 = line2.strip().split() det_pic2 = np.asarray(list(map(int, line2[0:4])), dtype=np.int32) landmarks_pic2 = faceAlignModelHandler.inference_on_image(image2, det_pic2) except Exception as e: logger.error('Face landmark failed!') logger.error(e) sys.exit(-1) # ★☆★★☆★★☆★★☆★alignment fin★☆★★☆★★☆★★☆★★☆★ #-----------------------crop start-------------------------- face_cropper = FaceRecImageCropper() image1 = cv2.imread(image_path_pic1) image2 = cv2.imread(image_path_pic2) #landmark의 차원수를 낮추기 위해 flatten() 사용 flatten_landmarks1 = np.array(landmarks_pic1).flatten().tolist() flatten_landmarks2 = np.array(landmarks_pic2).flatten().tolist() landmarks1 = [float(num) for num in flatten_landmarks1] cropped_image1 = face_cropper.crop_image_by_mat(image1, landmarks1) landmarks2 = [float(num) for num in flatten_landmarks2] cropped_image2 = face_cropper.crop_image_by_mat(image2, landmarks2) ''' cv2.imshow("cropped_image1", cropped_image1) cv2.waitKey() cv2.destroyAllWindows() cv2.imshow("cropped_image2", cropped_image2) cv2.waitKey() cv2.destroyAllWindows() ''' #크롭된 이미지 저장하는 문장 #cv2.imwrite('api_usage/temp_pic/pic1_cropped.jpg', cropped_image1) #cv2.imwrite('api_usage/temp_pic/pic2_cropped.jpg', cropped_image2) # ★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆crop fin★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆ #----------------------feature start-------------------------- model_category = 'face_recognition' model_name = model_conf[scene][model_category] try: faceRecModelLoader = FaceRecModelLoader(model_path, model_category, model_name) except Exception as e: logger.error('Failed to parse model configuration file!') logger.error(e) sys.exit(-1) try: model, cfg = faceRecModelLoader.load_model() except Exception as e: logger.error('Model loading failed!') logger.error(e) sys.exit(-1) faceRecModelHandler = FaceRecModelHandler(model, 'cpu', cfg) try: # 여기서 오류 #크롭된 이미지의 피쳐를 뽑아냄 feature1 = faceRecModelHandler.inference_on_image(cropped_image1) feature2 = faceRecModelHandler.inference_on_image(cropped_image2) except Exception as e: logger.error('Failed to extract facial features!') logger.error(e) sys.exit(-1) # ★☆★★☆★★☆★★☆★feature fin★☆★★☆★★☆★★☆★★☆★ #----------------------pipline start-------------------------- #점수 계산 pic1_crop의 feature와 pic2_crop의 feature dot 연산 score = np.dot(feature1, feature2) print('The score for pic1 and pic2 is', score)
import sys import face_merge sys.path.append('.') import logging mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) import logging.config logging.config.fileConfig("config/logging.conf") logger = logging.getLogger('api') import yaml import cv2 import numpy as np #-----------------detect------------------- from core.model_loader.face_detection.FaceDetModelLoader import FaceDetModelLoader from core.model_handler.face_detection.FaceDetModelHandler import FaceDetModelHandler #-----------------align------------------- from core.model_loader.face_alignment.FaceAlignModelLoader import FaceAlignModelLoader from core.model_handler.face_alignment.FaceAlignModelHandler import FaceAlignModelHandler #----------------crop--------------------- from core.image_cropper.arcface_cropper.FaceRecImageCropper import FaceRecImageCropper #----------------------feature---------------------- from core.model_loader.face_recognition.FaceRecModelLoader import FaceRecModelLoader from core.model_handler.face_recognition.FaceRecModelHandler import FaceRecModelHandler with open('config/model_conf.yaml') as f: model_conf = yaml.load(f, Loader=yaml.Loader) if __name__ == '__main__': # common setting for all model, need not modify. model_path = 'models' # model setting, modified along with model scene = 'non-mask' model_category = 'face_detection' model_name = model_conf[scene][model_category] # load model try: faceDetModelLoader = FaceDetModelLoader(model_path, model_category, model_name) except Exception as e: logger.error('Failed to parse model configuration file!') logger.error(e) sys.exit(-1) try: model, cfg = faceDetModelLoader.load_model() except Exception as e: logger.error('Model loading failed!') logger.error(e) sys.exit(-1) # read image image_path_pic1 = 'api_usage/temp_pic/pic1.jpg' image_path_pic2 = 'api_usage/temp_pic/pic2.jpg' image1 = cv2.imread(image_path_pic1, cv2.IMREAD_COLOR) image2 = cv2.imread(image_path_pic2, cv2.IMREAD_COLOR) ''' cv2.imshow("pic1", image1) cv2.waitKey() cv2.destroyAllWindows() cv2.imshow("pic2", image2) cv2.waitKey() cv2.destroyAllWindows() ''' faceDetModelHandler = FaceDetModelHandler(model, 'cpu', cfg) try: dets_pic1 = faceDetModelHandler.inference_on_image(image1) dets_pic2 = faceDetModelHandler.inference_on_image(image2) except Exception as e: logger.error('Face detection failed!') logger.error(e) sys.exit(-1) pic1_box = dets_pic1 line1 = str(int(pic1_box[0][0])) + " " + str(int(pic1_box[0][1])) + " " + \ str(int(pic1_box[0][2])) + " " + str(int(pic1_box[0][3])) + " " + \ str(pic1_box[0][4]) + " \n" pic2_box = dets_pic2 line2 = str(int(pic2_box[0][0])) + " " + str(int(pic2_box[0][1])) + " " + \ str(int(pic2_box[0][2])) + " " + str(int(pic2_box[0][3])) + " " + \ str(pic2_box[0][4]) + " \n" #★☆★★☆★★☆★★☆★detect fin★☆★★☆★★☆★★☆★★☆★ #-------------------alignment start------------------------- #change model_category model_category = 'face_alignment' model_name = model_conf[scene][model_category] try: faceAlignModelLoader = FaceAlignModelLoader(model_path, model_category, model_name) except Exception as e: logger.error('Failed to parse model configuration file!') logger.error(e) sys.exit(-1) try: model, cfg = faceAlignModelLoader.load_model() except Exception as e: logger.error('Model loading failed!') logger.error(e) sys.exit(-1) faceAlignModelHandler = FaceAlignModelHandler(model, 'cpu', cfg) image_path_pic1 = 'api_usage/temp_pic/pic1.jpg' image_det_txt_path_pic1 = 'api_usage/temp_pic/pic1_detect_res.txt' image_pic1 = cv2.imread(image_path_pic1, cv2.IMREAD_COLOR) image_path_pic2 = 'api_usage/temp_pic/pic2.jpg' image_det_txt_path_pic2 = 'api_usage/temp_pic/pic2_detect_res.txt' image_pic2 = cv2.imread(image_path_pic2, cv2.IMREAD_COLOR) # ----------------------pic landmark---------------------- try: line1 = line1.strip().split() det_pic1 = np.asarray(list(map(int, line1[0:4])), dtype=np.int32) landmarks_pic1 = faceAlignModelHandler.inference_on_image(image1, det_pic1) line2 = line2.strip().split() det_pic2 = np.asarray(list(map(int, line2[0:4])), dtype=np.int32) landmarks_pic2 = faceAlignModelHandler.inference_on_image(image2, det_pic2) except Exception as e: logger.error('Face landmark failed!') logger.error(e) sys.exit(-1) # ★☆★★☆★★☆★★☆★alignment fin★☆★★☆★★☆★★☆★★☆★ #-----------------------crop start-------------------------- face_cropper = FaceRecImageCropper() image1 = cv2.imread(image_path_pic1) image2 = cv2.imread(image_path_pic2) #landmark의 차원수를 낮추기 위해 flatten() 사용 flatten_landmarks1 = np.array(landmarks_pic1).flatten().tolist() flatten_landmarks2 = np.array(landmarks_pic2).flatten().tolist() landmarks1 = [float(num) for num in flatten_landmarks1] cropped_image1 = face_cropper.crop_image_by_mat(image1, landmarks1) landmarks2 = [float(num) for num in flatten_landmarks2] cropped_image2 = face_cropper.crop_image_by_mat(image2, landmarks2) ''' cv2.imshow("cropped_image1", cropped_image1) cv2.waitKey() cv2.destroyAllWindows() cv2.imshow("cropped_image2", cropped_image2) cv2.waitKey() cv2.destroyAllWindows() ''' #크롭된 이미지 저장하는 문장 #cv2.imwrite('api_usage/temp_pic/pic1_cropped.jpg', cropped_image1) #cv2.imwrite('api_usage/temp_pic/pic2_cropped.jpg', cropped_image2) # ★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆crop fin★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆ #----------------------feature start-------------------------- model_category = 'face_recognition' model_name = model_conf[scene][model_category] try: faceRecModelLoader = FaceRecModelLoader(model_path, model_category, model_name) except Exception as e: logger.error('Failed to parse model configuration file!') logger.error(e) sys.exit(-1) try: model, cfg = faceRecModelLoader.load_model() except Exception as e: logger.error('Model loading failed!') logger.error(e) sys.exit(-1) faceRecModelHandler = FaceRecModelHandler(model, 'cpu', cfg) try: # 여기서 오류 #크롭된 이미지의 피쳐를 뽑아냄 feature1 = faceRecModelHandler.inference_on_image(cropped_image1) feature2 = faceRecModelHandler.inference_on_image(cropped_image2) except Exception as e: logger.error('Failed to extract facial features!') logger.error(e) sys.exit(-1) # ★☆★★☆★★☆★★☆★feature fin★☆★★☆★★☆★★☆★★☆★ #----------------------pipline start-------------------------- #점수 계산 pic1_crop의 feature와 pic2_crop의 feature dot 연산 score = np.dot(feature1, feature2) print('The score for pic1 and pic2 is', score)
en
0.182112
#-----------------detect------------------- #-----------------align------------------- #----------------crop--------------------- #----------------------feature---------------------- # common setting for all model, need not modify. # model setting, modified along with model # load model # read image cv2.imshow("pic1", image1) cv2.waitKey() cv2.destroyAllWindows() cv2.imshow("pic2", image2) cv2.waitKey() cv2.destroyAllWindows() #★☆★★☆★★☆★★☆★detect fin★☆★★☆★★☆★★☆★★☆★ #-------------------alignment start------------------------- #change model_category # ----------------------pic landmark---------------------- # ★☆★★☆★★☆★★☆★alignment fin★☆★★☆★★☆★★☆★★☆★ #-----------------------crop start-------------------------- #landmark의 차원수를 낮추기 위해 flatten() 사용 cv2.imshow("cropped_image1", cropped_image1) cv2.waitKey() cv2.destroyAllWindows() cv2.imshow("cropped_image2", cropped_image2) cv2.waitKey() cv2.destroyAllWindows() #크롭된 이미지 저장하는 문장 #cv2.imwrite('api_usage/temp_pic/pic1_cropped.jpg', cropped_image1) #cv2.imwrite('api_usage/temp_pic/pic2_cropped.jpg', cropped_image2) # ★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆crop fin★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆ #----------------------feature start-------------------------- # 여기서 오류 #크롭된 이미지의 피쳐를 뽑아냄 # ★☆★★☆★★☆★★☆★feature fin★☆★★☆★★☆★★☆★★☆★ #----------------------pipline start-------------------------- #점수 계산 pic1_crop의 feature와 pic2_crop의 feature dot 연산
2.004989
2
tests/test_strings.py
luto/django-i18nfield
35
6631482
<filename>tests/test_strings.py from django.utils import translation from django.utils.translation import gettext_noop from i18nfield.strings import LazyI18nString def test_explicit_translation(): data = { 'de': 'Hallo', 'en': 'Hello' } s = LazyI18nString(data) translation.activate('en') assert str(s) == 'Hello' translation.activate('de') assert str(s) == 'Hallo' assert bool(s) def test_create_from_string(): s = LazyI18nString('{"en": "Hello"}') assert s.data == {"en": "Hello"} s = LazyI18nString('Invalid JSON') assert s.data == 'Invalid JSON' def test_similar_translations(): data = { 'en': 'You', 'de': 'Sie', 'de-informal': 'Du' } s = LazyI18nString(data) translation.activate('de') assert str(s) == 'Sie' translation.activate('de-informal') assert str(s) == 'Du' data = { 'en': 'You', 'de-informal': 'Du' } s = LazyI18nString(data) translation.activate('de') assert str(s) == 'Du' translation.activate('de-informal') assert str(s) == 'Du' data = { 'en': 'You', 'de': 'Sie' } s = LazyI18nString(data) translation.activate('de') assert str(s) == 'Sie' translation.activate('de-informal') assert str(s) == 'Sie' def test_missing_default_translation(): data = { 'de': 'Hallo', } s = LazyI18nString(data) translation.activate('en') assert str(s) == 'Hallo' translation.activate('de') assert str(s) == 'Hallo' def test_missing_translation(): data = { 'en': 'Hello', } s = LazyI18nString(data) translation.activate('en') assert str(s) == 'Hello' translation.activate('de') assert str(s) == 'Hello' def test_legacy_string(): s = LazyI18nString("Hello") translation.activate('en') assert str(s) == 'Hello' translation.activate('de') assert str(s) == 'Hello' assert bool(s) def test_none(): s = LazyI18nString(None) assert str(s) == "" assert not bool(s) s = LazyI18nString("") assert str(s) == "" assert not bool(s) s = LazyI18nString({}) assert str(s) == "" assert not bool(s) def test_format(): data = { 'en': 'You', 'de': 'Sie' } s = LazyI18nString(data) translation.activate('de') assert '{}'.format(s) == 'Sie' def test_equality(): data = { 'en': 'You', 'de': 'Sie' } s1 = LazyI18nString(data) s2 = LazyI18nString(data.copy()) s3 = LazyI18nString({'en': 'I', 'de': 'Ich'}) assert s1 == s2 assert s2 != s3 assert s1 != None # noqa assert s1 == data def test_from_gettext(): gstr = gettext_noop('Welcome') lstr = LazyI18nString.from_gettext(gstr) assert 'de' in lstr.data assert lstr.data['en'] == 'Welcome' def test_map(): data = { 'de': 'hallo', 'en': 'hello' } s = LazyI18nString(data) translation.activate('en') assert str(s) == 'hello' translation.activate('de') assert str(s) == 'hallo' s.map(lambda s: s.capitalize()) translation.activate('en') assert str(s) == 'Hello' translation.activate('de') assert str(s) == 'Hallo'
<filename>tests/test_strings.py from django.utils import translation from django.utils.translation import gettext_noop from i18nfield.strings import LazyI18nString def test_explicit_translation(): data = { 'de': 'Hallo', 'en': 'Hello' } s = LazyI18nString(data) translation.activate('en') assert str(s) == 'Hello' translation.activate('de') assert str(s) == 'Hallo' assert bool(s) def test_create_from_string(): s = LazyI18nString('{"en": "Hello"}') assert s.data == {"en": "Hello"} s = LazyI18nString('Invalid JSON') assert s.data == 'Invalid JSON' def test_similar_translations(): data = { 'en': 'You', 'de': 'Sie', 'de-informal': 'Du' } s = LazyI18nString(data) translation.activate('de') assert str(s) == 'Sie' translation.activate('de-informal') assert str(s) == 'Du' data = { 'en': 'You', 'de-informal': 'Du' } s = LazyI18nString(data) translation.activate('de') assert str(s) == 'Du' translation.activate('de-informal') assert str(s) == 'Du' data = { 'en': 'You', 'de': 'Sie' } s = LazyI18nString(data) translation.activate('de') assert str(s) == 'Sie' translation.activate('de-informal') assert str(s) == 'Sie' def test_missing_default_translation(): data = { 'de': 'Hallo', } s = LazyI18nString(data) translation.activate('en') assert str(s) == 'Hallo' translation.activate('de') assert str(s) == 'Hallo' def test_missing_translation(): data = { 'en': 'Hello', } s = LazyI18nString(data) translation.activate('en') assert str(s) == 'Hello' translation.activate('de') assert str(s) == 'Hello' def test_legacy_string(): s = LazyI18nString("Hello") translation.activate('en') assert str(s) == 'Hello' translation.activate('de') assert str(s) == 'Hello' assert bool(s) def test_none(): s = LazyI18nString(None) assert str(s) == "" assert not bool(s) s = LazyI18nString("") assert str(s) == "" assert not bool(s) s = LazyI18nString({}) assert str(s) == "" assert not bool(s) def test_format(): data = { 'en': 'You', 'de': 'Sie' } s = LazyI18nString(data) translation.activate('de') assert '{}'.format(s) == 'Sie' def test_equality(): data = { 'en': 'You', 'de': 'Sie' } s1 = LazyI18nString(data) s2 = LazyI18nString(data.copy()) s3 = LazyI18nString({'en': 'I', 'de': 'Ich'}) assert s1 == s2 assert s2 != s3 assert s1 != None # noqa assert s1 == data def test_from_gettext(): gstr = gettext_noop('Welcome') lstr = LazyI18nString.from_gettext(gstr) assert 'de' in lstr.data assert lstr.data['en'] == 'Welcome' def test_map(): data = { 'de': 'hallo', 'en': 'hello' } s = LazyI18nString(data) translation.activate('en') assert str(s) == 'hello' translation.activate('de') assert str(s) == 'hallo' s.map(lambda s: s.capitalize()) translation.activate('en') assert str(s) == 'Hello' translation.activate('de') assert str(s) == 'Hallo'
none
1
2.257684
2
library_collection/admin.py
mredar/avram
0
6631483
<reponame>mredar/avram # -*- coding: utf-8 -*- import datetime from django.contrib import admin from django import forms from library_collection.duration_widget import MultiValueDurationField from library_collection.models import Campus from library_collection.models import Repository from library_collection.models import Collection from library_collection.models import CollectionCustomFacet from library_collection.admin_actions import queue_harvest_normal_stage from library_collection.admin_actions import queue_image_harvest_normal_stage from library_collection.admin_actions import queue_sync_couchdb from library_collection.admin_actions import set_ready_for_publication from library_collection.admin_actions import queue_sync_to_solr_normal_stage from library_collection.admin_actions import \ queue_sync_to_solr_normal_production from library_collection.admin_actions import \ queue_delete_from_solr_normal_stage from library_collection.admin_actions import \ queue_delete_from_solr_normal_production from library_collection.admin_actions import \ queue_deep_harvest_normal_stage from library_collection.admin_actions import \ queue_deep_harvest_replace_normal_stage from library_collection.admin_actions import \ queue_delete_couchdb_collection_stage from library_collection.admin_actions import \ queue_delete_couchdb_collection_production from django.contrib.sites.models import Site from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import User from django.contrib.admin import SimpleListFilter from django.http import HttpResponseRedirect from django.db.models import F # Add is_active & date_joined to User admin list view UserAdmin.list_display = ('username', 'email', 'first_name', 'last_name', 'is_active', 'date_joined', 'is_staff') admin.site.unregister(User) admin.site.register(User, UserAdmin) class NotInCampus(SimpleListFilter): title = 'Not on a Campus' parameter_name = 'nocampus' def lookups(self, request, model_admin): return (('NOCAMPUS', 'Not on a campus'), ('CAMPUS', 'on a campus')) def queryset(self, request, queryset): if self.value() == 'NOCAMPUS': return queryset.filter(campus=None) if self.value() == 'CAMPUS': return queryset.exclude(campus=None) class HarvestOverdueFilter(SimpleListFilter): '''Filter for collections where date_last_harvested + harvest_frequency is in past. ''' title = 'Overdue Harvest' parameter_name = 'harvest_overdue' def lookups(self, request, model_admin): return ( ('Y', 'Harvest Overdue'), ('N', 'Harvest Not Due'), ('NP', 'Not periodic'), ('P', 'Periodic'), ) def queryset(self, request, queryset): if self.value() == 'Y': return queryset.filter(date_last_harvested__lt=( datetime.datetime.today() - F('harvest_frequency'))) if self.value() == 'N': return queryset.filter(date_last_harvested__gt=( datetime.datetime.today() - F('harvest_frequency'))) if self.value() == 'NP': return queryset.filter(harvest_frequency__isnull=True) if self.value() == 'P': return queryset.filter(harvest_frequency__isnull=False) return queryset class URLFieldsListFilter(SimpleListFilter): '''Filter to find blank or filled URL fields''' title = 'URL Fields' lookup_table = { 'LOCAL': ('has local URL', lambda x: x.exclude(url_local__exact='')), 'LOCALNOT': ('missing local URL', lambda x: x.filter(url_local__exact='')), 'OAC': ('has OAC URL', lambda x: x.exclude(url_oac__exact='')), 'OACNOT': ('missing OAC URL', lambda x: x.filter(url_oac__exact='')), 'HARVEST': ('has HARVEST URL', lambda x: x.exclude(url_harvest__exact='')), 'HARVESTNOT': ('missing HARVEST URL', lambda x: x.filter(url_harvest__exact='')), } # Parameter for the filter that will be used in the URL query. parameter_name = 'urlfields' def lookups(self, request, model_admin): """ Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar. """ return tuple( [(k, v[0]) for k, v in URLFieldsListFilter.lookup_table.items()]) def queryset(self, request, queryset): """ Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`. """ try: return URLFieldsListFilter.lookup_table[self.value()][1](queryset) except KeyError: pass # from: http://stackoverflow.com/questions/2805701/ class ActionInChangeFormMixin(object): def response_action(self, request, queryset): """ Prefer http referer for redirect """ response = super(ActionInChangeFormMixin, self).response_action( request, queryset) if isinstance(response, HttpResponseRedirect): response['Location'] = request.META.get('HTTP_REFERER', request.path) return response def change_view(self, request, object_id, extra_context=None): actions = self.get_actions(request) if actions: action_form = self.action_form(auto_id=None) action_form.fields['action'].choices = self.get_action_choices( request) else: action_form = None return super(ActionInChangeFormMixin, self).change_view( request, object_id, extra_context={'action_form': action_form, }) class CollectionCustomFacetInline(admin.StackedInline): model = CollectionCustomFacet fk_name = 'collection' class CollectionAdminForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(CollectionAdminForm, self).__init__(*args, **kwargs) self.fields['harvest_frequency'] = MultiValueDurationField( label='Harvest Frequency', help_text="In 30 day Months and Days") class Meta: model = Collection fields = '__all__' class CollectionAdmin(ActionInChangeFormMixin, admin.ModelAdmin): # http://stackoverflow.com/a/11321942/1763984 inlines = [CollectionCustomFacetInline, ] form = CollectionAdminForm def campuses(self): return ", ".join([x.__str__() for x in self.campus.all()]) campuses.short_description = "Campus" def repositories(self): return ", ".join([x.__str__() for x in self.repository.all()]) repositories.short_description = "Repository" def numeric_key(self): return self.pk numeric_key.short_description = "Numeric key" list_display = ('name', campuses, repositories, 'human_extent', numeric_key, 'date_last_harvested') list_filter = [ 'campus', HarvestOverdueFilter, 'ready_for_publication', NotInCampus, 'harvest_type', URLFieldsListFilter, 'repository' ] search_fields = ['name', 'description', 'enrichments_item'] actions = [ queue_harvest_normal_stage, queue_image_harvest_normal_stage, queue_deep_harvest_normal_stage, queue_deep_harvest_replace_normal_stage, queue_sync_to_solr_normal_stage, queue_sync_couchdb, queue_sync_to_solr_normal_production, queue_delete_couchdb_collection_stage, queue_delete_from_solr_normal_stage, queue_delete_couchdb_collection_production, queue_delete_from_solr_normal_production, set_ready_for_publication, ] fieldsets = ( ( 'Descriptive Information', { 'fields': ( 'name', 'campus', 'repository', 'description', 'local_id', 'url_local', 'url_oac', 'rights_status', 'rights_statement', 'ready_for_publication', 'featured') }, ), ( 'For Nuxeo Collections', { # 'classes': ('collapse',), 'fields': ( 'extent', 'formats', 'hosted', 'merritt_id', 'staging_notes', 'files_in_hand', 'files_in_dams', 'metadata_in_dams', 'qa_completed', ) }), ( 'For Harvest Collections', { 'fields': ( 'harvest_type', 'dcmi_type', 'url_harvest', 'harvest_extra_data', 'enrichments_item', 'date_last_harvested', 'harvest_frequency', 'harvest_exception_notes') })) def human_extent(self, obj): return obj.human_extent human_extent.short_description = 'extent' def formfield_for_manytomany(self, db_field, request, **kwargs): if db_field.name == "repository": kwargs["queryset"] = Repository.objects.order_by('name') return super(CollectionAdmin, self).formfield_for_manytomany( db_field, request, **kwargs) class CampusAdmin(admin.ModelAdmin): list_display = ('name', 'slug') class RepositoryAdmin(admin.ModelAdmin): search_fields = ['name'] admin.site.register(Collection, CollectionAdmin) admin.site.register(Campus, CampusAdmin) admin.site.register(Repository, RepositoryAdmin) # http://stackoverflow.com/questions/5742279/ # removing-sites-from-django-admin-page try: admin.site.unregister(Site) except admin.sites.NotRegistered: pass admin.site.disable_action('delete_selected') # Copyright © 2016, Regents of the University of California # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # - Neither the name of the University of California nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.
# -*- coding: utf-8 -*- import datetime from django.contrib import admin from django import forms from library_collection.duration_widget import MultiValueDurationField from library_collection.models import Campus from library_collection.models import Repository from library_collection.models import Collection from library_collection.models import CollectionCustomFacet from library_collection.admin_actions import queue_harvest_normal_stage from library_collection.admin_actions import queue_image_harvest_normal_stage from library_collection.admin_actions import queue_sync_couchdb from library_collection.admin_actions import set_ready_for_publication from library_collection.admin_actions import queue_sync_to_solr_normal_stage from library_collection.admin_actions import \ queue_sync_to_solr_normal_production from library_collection.admin_actions import \ queue_delete_from_solr_normal_stage from library_collection.admin_actions import \ queue_delete_from_solr_normal_production from library_collection.admin_actions import \ queue_deep_harvest_normal_stage from library_collection.admin_actions import \ queue_deep_harvest_replace_normal_stage from library_collection.admin_actions import \ queue_delete_couchdb_collection_stage from library_collection.admin_actions import \ queue_delete_couchdb_collection_production from django.contrib.sites.models import Site from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import User from django.contrib.admin import SimpleListFilter from django.http import HttpResponseRedirect from django.db.models import F # Add is_active & date_joined to User admin list view UserAdmin.list_display = ('username', 'email', 'first_name', 'last_name', 'is_active', 'date_joined', 'is_staff') admin.site.unregister(User) admin.site.register(User, UserAdmin) class NotInCampus(SimpleListFilter): title = 'Not on a Campus' parameter_name = 'nocampus' def lookups(self, request, model_admin): return (('NOCAMPUS', 'Not on a campus'), ('CAMPUS', 'on a campus')) def queryset(self, request, queryset): if self.value() == 'NOCAMPUS': return queryset.filter(campus=None) if self.value() == 'CAMPUS': return queryset.exclude(campus=None) class HarvestOverdueFilter(SimpleListFilter): '''Filter for collections where date_last_harvested + harvest_frequency is in past. ''' title = 'Overdue Harvest' parameter_name = 'harvest_overdue' def lookups(self, request, model_admin): return ( ('Y', 'Harvest Overdue'), ('N', 'Harvest Not Due'), ('NP', 'Not periodic'), ('P', 'Periodic'), ) def queryset(self, request, queryset): if self.value() == 'Y': return queryset.filter(date_last_harvested__lt=( datetime.datetime.today() - F('harvest_frequency'))) if self.value() == 'N': return queryset.filter(date_last_harvested__gt=( datetime.datetime.today() - F('harvest_frequency'))) if self.value() == 'NP': return queryset.filter(harvest_frequency__isnull=True) if self.value() == 'P': return queryset.filter(harvest_frequency__isnull=False) return queryset class URLFieldsListFilter(SimpleListFilter): '''Filter to find blank or filled URL fields''' title = 'URL Fields' lookup_table = { 'LOCAL': ('has local URL', lambda x: x.exclude(url_local__exact='')), 'LOCALNOT': ('missing local URL', lambda x: x.filter(url_local__exact='')), 'OAC': ('has OAC URL', lambda x: x.exclude(url_oac__exact='')), 'OACNOT': ('missing OAC URL', lambda x: x.filter(url_oac__exact='')), 'HARVEST': ('has HARVEST URL', lambda x: x.exclude(url_harvest__exact='')), 'HARVESTNOT': ('missing HARVEST URL', lambda x: x.filter(url_harvest__exact='')), } # Parameter for the filter that will be used in the URL query. parameter_name = 'urlfields' def lookups(self, request, model_admin): """ Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar. """ return tuple( [(k, v[0]) for k, v in URLFieldsListFilter.lookup_table.items()]) def queryset(self, request, queryset): """ Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`. """ try: return URLFieldsListFilter.lookup_table[self.value()][1](queryset) except KeyError: pass # from: http://stackoverflow.com/questions/2805701/ class ActionInChangeFormMixin(object): def response_action(self, request, queryset): """ Prefer http referer for redirect """ response = super(ActionInChangeFormMixin, self).response_action( request, queryset) if isinstance(response, HttpResponseRedirect): response['Location'] = request.META.get('HTTP_REFERER', request.path) return response def change_view(self, request, object_id, extra_context=None): actions = self.get_actions(request) if actions: action_form = self.action_form(auto_id=None) action_form.fields['action'].choices = self.get_action_choices( request) else: action_form = None return super(ActionInChangeFormMixin, self).change_view( request, object_id, extra_context={'action_form': action_form, }) class CollectionCustomFacetInline(admin.StackedInline): model = CollectionCustomFacet fk_name = 'collection' class CollectionAdminForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(CollectionAdminForm, self).__init__(*args, **kwargs) self.fields['harvest_frequency'] = MultiValueDurationField( label='Harvest Frequency', help_text="In 30 day Months and Days") class Meta: model = Collection fields = '__all__' class CollectionAdmin(ActionInChangeFormMixin, admin.ModelAdmin): # http://stackoverflow.com/a/11321942/1763984 inlines = [CollectionCustomFacetInline, ] form = CollectionAdminForm def campuses(self): return ", ".join([x.__str__() for x in self.campus.all()]) campuses.short_description = "Campus" def repositories(self): return ", ".join([x.__str__() for x in self.repository.all()]) repositories.short_description = "Repository" def numeric_key(self): return self.pk numeric_key.short_description = "Numeric key" list_display = ('name', campuses, repositories, 'human_extent', numeric_key, 'date_last_harvested') list_filter = [ 'campus', HarvestOverdueFilter, 'ready_for_publication', NotInCampus, 'harvest_type', URLFieldsListFilter, 'repository' ] search_fields = ['name', 'description', 'enrichments_item'] actions = [ queue_harvest_normal_stage, queue_image_harvest_normal_stage, queue_deep_harvest_normal_stage, queue_deep_harvest_replace_normal_stage, queue_sync_to_solr_normal_stage, queue_sync_couchdb, queue_sync_to_solr_normal_production, queue_delete_couchdb_collection_stage, queue_delete_from_solr_normal_stage, queue_delete_couchdb_collection_production, queue_delete_from_solr_normal_production, set_ready_for_publication, ] fieldsets = ( ( 'Descriptive Information', { 'fields': ( 'name', 'campus', 'repository', 'description', 'local_id', 'url_local', 'url_oac', 'rights_status', 'rights_statement', 'ready_for_publication', 'featured') }, ), ( 'For Nuxeo Collections', { # 'classes': ('collapse',), 'fields': ( 'extent', 'formats', 'hosted', 'merritt_id', 'staging_notes', 'files_in_hand', 'files_in_dams', 'metadata_in_dams', 'qa_completed', ) }), ( 'For Harvest Collections', { 'fields': ( 'harvest_type', 'dcmi_type', 'url_harvest', 'harvest_extra_data', 'enrichments_item', 'date_last_harvested', 'harvest_frequency', 'harvest_exception_notes') })) def human_extent(self, obj): return obj.human_extent human_extent.short_description = 'extent' def formfield_for_manytomany(self, db_field, request, **kwargs): if db_field.name == "repository": kwargs["queryset"] = Repository.objects.order_by('name') return super(CollectionAdmin, self).formfield_for_manytomany( db_field, request, **kwargs) class CampusAdmin(admin.ModelAdmin): list_display = ('name', 'slug') class RepositoryAdmin(admin.ModelAdmin): search_fields = ['name'] admin.site.register(Collection, CollectionAdmin) admin.site.register(Campus, CampusAdmin) admin.site.register(Repository, RepositoryAdmin) # http://stackoverflow.com/questions/5742279/ # removing-sites-from-django-admin-page try: admin.site.unregister(Site) except admin.sites.NotRegistered: pass admin.site.disable_action('delete_selected') # Copyright © 2016, Regents of the University of California # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # - Neither the name of the University of California nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.
en
0.720342
# -*- coding: utf-8 -*- # Add is_active & date_joined to User admin list view Filter for collections where date_last_harvested + harvest_frequency is in past. Filter to find blank or filled URL fields # Parameter for the filter that will be used in the URL query. Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar. Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`. # from: http://stackoverflow.com/questions/2805701/ Prefer http referer for redirect # http://stackoverflow.com/a/11321942/1763984 # 'classes': ('collapse',), # http://stackoverflow.com/questions/5742279/ # removing-sites-from-django-admin-page # Copyright © 2016, Regents of the University of California # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # - Neither the name of the University of California nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.
1.451379
1
tools/gn/bin/roll_gn.py
google-ar/chromium
777
6631484
<gh_stars>100-1000 #!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """An auto-roller for GN binaries into Chromium. This script is used to update the GN binaries that a Chromium checkout uses. In order to update the binaries, one must follow four steps in order: 1. Trigger try jobs to build a new GN binary at tip-of-tree and upload the newly-built binaries into the right Google CloudStorage bucket. 2. Wait for the try jobs to complete. 3. Update the buildtools repo with the .sha1 hashes of the newly built binaries. 4. Update Chromium's DEPS file to the new version of the buildtools repo. The script has four commands that correspond to the four steps above: 'build', 'wait', 'roll_buildtools', and 'roll_deps'. The script has a fifth command, 'roll', that runs the four in order. If given no arguments, the script will run the 'roll' command. It can only be run on linux in a clean Chromium checkout; it should error out in most cases if something bad happens, but the error checking isn't yet foolproof. """ from __future__ import print_function import argparse import json import os import re import subprocess import sys import tempfile import time import urllib2 depot_tools_path = None for p in os.environ['PATH'].split(os.pathsep): if (p.rstrip(os.sep).endswith('depot_tools') and os.path.isfile(os.path.join(p, 'gclient.py'))): depot_tools_path = p assert depot_tools_path if not depot_tools_path in sys.path: sys.path.insert(0, depot_tools_path) third_party_path = os.path.join(depot_tools_path, 'third_party') if not third_party_path in sys.path: sys.path.insert(0, third_party_path) import upload CHROMIUM_REPO = 'https://chromium.googlesource.com/chromium/src.git' CODE_REVIEW_SERVER = 'https://codereview.chromium.org' COMMITISH_DIGITS = 10 class GNRoller(object): def __init__(self): self.chromium_src_dir = None self.buildtools_dir = None self.old_gn_commitish = None self.new_gn_commitish = None self.old_gn_version = None self.new_gn_version = None self.reviewer = '<EMAIL>' if os.getenv('USER') == 'dpranke': self.reviewer = '<EMAIL>' def Roll(self): parser = argparse.ArgumentParser() parser.usage = __doc__ parser.add_argument('command', nargs='?', default='roll', help='build|roll|roll_buildtools|roll_deps|wait' ' (%(default)s is the default)') args = parser.parse_args() command = args.command ret = self.SetUp() if not ret and command in ('roll', 'build'): ret = self.TriggerBuild() if not ret and command in ('roll', 'wait'): ret = self.WaitForBuildToFinish() if not ret and command in ('roll', 'roll_buildtools'): ret = self.RollBuildtools() if not ret and command in ('roll', 'roll_deps'): ret = self.RollDEPS() return ret def SetUp(self): if sys.platform != 'linux2': print('roll_gn is only tested and working on Linux for now.') return 1 ret, out, _ = self.Call('git config --get remote.origin.url') origin = out.strip() if ret or origin != CHROMIUM_REPO: print('Not in a Chromium repo? git config --get remote.origin.url ' 'returned %d: %s' % (ret, origin)) return 1 ret, _, _ = self.Call('git diff -q') if ret: print("Checkout is dirty, exiting") return 1 _, out, _ = self.Call('git rev-parse --show-toplevel', cwd=os.getcwd()) self.chromium_src_dir = out.strip() self.buildtools_dir = os.path.join(self.chromium_src_dir, 'buildtools') self.new_gn_commitish, self.new_gn_version = self.GetNewVersions() _, out, _ = self.Call('gn --version') self.old_gn_version = out.strip() _, out, _ = self.Call('git crrev-parse %s' % self.old_gn_version) self.old_gn_commitish = out.strip() return 0 def GetNewVersions(self): _, out, _ = self.Call('git log -1 --grep Cr-Commit-Position') commit_msg = out.splitlines() first_line = commit_msg[0] new_gn_commitish = first_line.split()[1] last_line = commit_msg[-1] new_gn_version = re.sub('.*master@{#(\d+)}', '\\1', last_line) return new_gn_commitish, new_gn_version def TriggerBuild(self): ret, _, _ = self.Call('git new-branch build_gn_%s' % self.new_gn_version) if ret: print('Failed to create a new branch for build_gn_%s' % self.new_gn_version) return 1 self.MakeDummyDepsChange() ret, out, err = self.Call('git commit -a -m "Build gn at %s"' % self.new_gn_version) if ret: print('git commit failed: %s' % out + err) return 1 print('Uploading CL to build GN at {#%s} - %s' % (self.new_gn_version, self.new_gn_commitish)) ret, out, err = self.Call('git cl upload -f') if ret: print('git-cl upload failed: %s' % out + err) return 1 print('Starting try jobs') self.Call('git-cl try -m tryserver.chromium.linux ' '-b linux_chromium_gn_upload -r %s' % self.new_gn_commitish) self.Call('git-cl try -m tryserver.chromium.mac ' '-b mac_chromium_gn_upload -r %s' % self.new_gn_commitish) self.Call('git-cl try -m tryserver.chromium.win ' '-b win8_chromium_gn_upload -r %s' % self.new_gn_commitish) return 0 def MakeDummyDepsChange(self): with open('DEPS') as fp: deps_content = fp.read() new_deps = deps_content.replace("'buildtools_revision':", "'buildtools_revision': ") with open('DEPS', 'w') as fp: fp.write(new_deps) def WaitForBuildToFinish(self): ret = self.CheckoutBuildBranch() if ret: return ret print('Checking build') results = self.CheckBuild() while (len(results) < 3 or any(r['state'] in ('pending', 'started') for r in results.values())): print() print('Sleeping for 30 seconds') time.sleep(30) print('Checking build') results = self.CheckBuild() ret = 0 if all(r['state'] == 'success' for r in results.values()) else 1 if ret: print('Build failed.') else: print('Builds ready.') # Close the build CL and move off of the build branch back to whatever # we were on before. self.Call('git-cl set-close') self.MoveToLastHead() return ret def CheckoutBuildBranch(self): ret, out, err = self.Call('git checkout build_gn_%s' % self.new_gn_version) if ret: print('Failed to check out build_gn_%s' % self.new_gn_version) if out: print(out) if err: print(err, file=sys.stderr) return ret def CheckBuild(self): _, out, _ = self.Call('git-cl issue') issue = int(out.split()[2]) _, out, _ = self.Call('git config user.email') email = '' rpc_server = upload.GetRpcServer(CODE_REVIEW_SERVER, email) try: props = json.loads(rpc_server.Send('/api/%d' % issue)) except Exception as _e: raise patchset = int(props['patchsets'][-1]) try: try_job_results = json.loads(rpc_server.Send( '/api/%d/%d/try_job_results' % (issue, patchset))) except Exception as _e: raise if not try_job_results: print('No try jobs found on most recent patchset') return {} results = {} for job in try_job_results: builder = job['builder'] if builder == 'linux_chromium_gn_upload': platform = 'linux64' elif builder == 'mac_chromium_gn_upload': platform = 'mac' elif builder == 'win8_chromium_gn_upload': platform = 'win' else: print('Unexpected builder: %s') continue TRY_JOB_RESULT_STATES = ('started', 'success', 'warnings', 'failure', 'skipped', 'exception', 'retry', 'pending') state = TRY_JOB_RESULT_STATES[int(job['result']) + 1] url_str = ' %s' % job['url'] build = url_str.split('/')[-1] sha1 = '-' results.setdefault(platform, {'build': -1, 'sha1': '', 'url': url_str}) if state == 'success': jsurl = url_str.replace('/builders/', '/json/builders/') fp = urllib2.urlopen(jsurl) js = json.loads(fp.read()) fp.close() sha1_step_name = 'gn sha1' for step in js['steps']: if step['name'] == sha1_step_name: # TODO: At some point infra changed the step text to # contain the step name; once all of the masters have been # restarted we can probably assert that the step text # with the step_name. sha1_step_text_prefix = sha1_step_name + '<br>' if step['text'][-1].startswith(sha1_step_text_prefix): sha1 = step['text'][-1][len(sha1_step_text_prefix):] else: sha1 = step['text'][-1] if results[platform]['build'] < build: results[platform]['build'] = build results[platform]['sha1'] = sha1 results[platform]['state'] = state results[platform]['url'] = url_str for platform, r in results.items(): print(platform) print(' sha1: %s' % r['sha1']) print(' state: %s' % r['state']) print(' build: %s' % r['build']) print(' url: %s' % r['url']) print() return results def RollBuildtools(self): ret = self.CheckoutBuildBranch() if ret: return ret results = self.CheckBuild() if (len(results) < 3 or not all(r['state'] == 'success' for r in results.values())): print("Roll isn't done or didn't succeed, exiting:") return 1 desc = self.GetBuildtoolsDesc() self.Call('git new-branch roll_buildtools_gn_%s' % self.new_gn_version, cwd=self.buildtools_dir) for platform in results: fname = 'gn.exe.sha1' if platform == 'win' else 'gn.sha1' path = os.path.join(self.buildtools_dir, platform, fname) with open(path, 'w') as fp: fp.write('%s\n' % results[platform]['sha1']) desc_file = tempfile.NamedTemporaryFile(delete=False) try: desc_file.write(desc) desc_file.close() self.Call('git commit -a -F %s' % desc_file.name, cwd=self.buildtools_dir) self.Call('git-cl upload -f --send-mail', cwd=self.buildtools_dir) finally: os.remove(desc_file.name) ret, out, err = self.Call('git cl land', cwd=self.buildtools_dir) if ret: print("buildtools git cl land failed: %d" % ret) if out: print(out) if err: print(err) return ret # Fetch the revision we just committed so that RollDEPS will find it. self.Call('git fetch', cwd=self.buildtools_dir) # Reset buildtools to the new commit so that we're not still on the # merged branch. self.Call('git checkout origin/master', cwd=self.buildtools_dir) _, out, _ = self.Call('git rev-parse origin/master', cwd=self.buildtools_dir) new_buildtools_commitish = out.strip() print('Ready to roll buildtools to %s in DEPS' % new_buildtools_commitish) return 0 def RollDEPS(self): ret, _, _ = self.Call('git new-branch roll_gn_%s' % self.new_gn_version) if ret: print('Failed to create a new branch for roll_gn_%s' % self.new_gn_version) return 1 _, out, _ = self.Call('git rev-parse origin/master', cwd=self.buildtools_dir) new_buildtools_commitish = out.strip() new_deps_lines = [] old_buildtools_commitish = '' with open(os.path.join(self.chromium_src_dir, 'DEPS')) as fp: for l in fp.readlines(): m = re.match(".*'buildtools_revision':.*'(.+)',", l) if m: old_buildtools_commitish = m.group(1) new_deps_lines.append(" 'buildtools_revision': '%s',\n" % new_buildtools_commitish) else: new_deps_lines.append(l) if not old_buildtools_commitish: print('Could not update DEPS properly, exiting') return 1 with open('DEPS', 'w') as fp: fp.write(''.join(new_deps_lines)) desc = self.GetDEPSRollDesc(old_buildtools_commitish, new_buildtools_commitish) desc_file = tempfile.NamedTemporaryFile(delete=False) try: desc_file.write(desc) desc_file.close() self.Call('git commit -a -F %s' % desc_file.name) self.Call('git-cl upload -f --send-mail --use-commit-queue') finally: os.remove(desc_file.name) # Move off of the roll branch onto whatever we were on before. # Do not explicitly close the roll CL issue, however; the CQ # will close it when the roll lands, assuming it does so. self.MoveToLastHead() return 0 def MoveToLastHead(self): # When this is called, there will be a commit + a checkout as # the two most recent entries in the reflog, assuming nothing as # modified the repo while this script has been running. _, out, _ = self.Call('git reflog -2') m = re.search('moving from ([^\s]+)', out) last_head = m.group(1) self.Call('git checkout %s' % last_head) def GetBuildtoolsDesc(self): gn_changes = self.GetGNChanges() return ( 'Roll gn %s..%s (r%s:r%s)\n' '\n' '%s' '\n' 'TBR=%s\n' % ( self.old_gn_commitish[:COMMITISH_DIGITS], self.new_gn_commitish[:COMMITISH_DIGITS], self.old_gn_version, self.new_gn_version, gn_changes, self.reviewer, )) def GetDEPSRollDesc(self, old_buildtools_commitish, new_buildtools_commitish): gn_changes = self.GetGNChanges() return ( 'Roll buildtools %s..%s\n' '\n' ' In order to roll GN %s..%s (r%s:r%s) and pick up\n' ' the following changes:\n' '\n' '%s' '\n' 'TBR=%s\n' % ( old_buildtools_commitish[:COMMITISH_DIGITS], new_buildtools_commitish[:COMMITISH_DIGITS], self.old_gn_commitish[:COMMITISH_DIGITS], self.new_gn_commitish[:COMMITISH_DIGITS], self.old_gn_version, self.new_gn_version, gn_changes, self.reviewer, )) def GetGNChanges(self): _, out, _ = self.Call( "git log --pretty=' %h %s' " + "%s..%s tools/gn" % (self.old_gn_commitish, self.new_gn_commitish)) return out def Call(self, cmd, cwd=None): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, cwd=(cwd or self.chromium_src_dir)) out, err = proc.communicate() return proc.returncode, out, err if __name__ == '__main__': roller = GNRoller() sys.exit(roller.Roll())
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """An auto-roller for GN binaries into Chromium. This script is used to update the GN binaries that a Chromium checkout uses. In order to update the binaries, one must follow four steps in order: 1. Trigger try jobs to build a new GN binary at tip-of-tree and upload the newly-built binaries into the right Google CloudStorage bucket. 2. Wait for the try jobs to complete. 3. Update the buildtools repo with the .sha1 hashes of the newly built binaries. 4. Update Chromium's DEPS file to the new version of the buildtools repo. The script has four commands that correspond to the four steps above: 'build', 'wait', 'roll_buildtools', and 'roll_deps'. The script has a fifth command, 'roll', that runs the four in order. If given no arguments, the script will run the 'roll' command. It can only be run on linux in a clean Chromium checkout; it should error out in most cases if something bad happens, but the error checking isn't yet foolproof. """ from __future__ import print_function import argparse import json import os import re import subprocess import sys import tempfile import time import urllib2 depot_tools_path = None for p in os.environ['PATH'].split(os.pathsep): if (p.rstrip(os.sep).endswith('depot_tools') and os.path.isfile(os.path.join(p, 'gclient.py'))): depot_tools_path = p assert depot_tools_path if not depot_tools_path in sys.path: sys.path.insert(0, depot_tools_path) third_party_path = os.path.join(depot_tools_path, 'third_party') if not third_party_path in sys.path: sys.path.insert(0, third_party_path) import upload CHROMIUM_REPO = 'https://chromium.googlesource.com/chromium/src.git' CODE_REVIEW_SERVER = 'https://codereview.chromium.org' COMMITISH_DIGITS = 10 class GNRoller(object): def __init__(self): self.chromium_src_dir = None self.buildtools_dir = None self.old_gn_commitish = None self.new_gn_commitish = None self.old_gn_version = None self.new_gn_version = None self.reviewer = '<EMAIL>' if os.getenv('USER') == 'dpranke': self.reviewer = '<EMAIL>' def Roll(self): parser = argparse.ArgumentParser() parser.usage = __doc__ parser.add_argument('command', nargs='?', default='roll', help='build|roll|roll_buildtools|roll_deps|wait' ' (%(default)s is the default)') args = parser.parse_args() command = args.command ret = self.SetUp() if not ret and command in ('roll', 'build'): ret = self.TriggerBuild() if not ret and command in ('roll', 'wait'): ret = self.WaitForBuildToFinish() if not ret and command in ('roll', 'roll_buildtools'): ret = self.RollBuildtools() if not ret and command in ('roll', 'roll_deps'): ret = self.RollDEPS() return ret def SetUp(self): if sys.platform != 'linux2': print('roll_gn is only tested and working on Linux for now.') return 1 ret, out, _ = self.Call('git config --get remote.origin.url') origin = out.strip() if ret or origin != CHROMIUM_REPO: print('Not in a Chromium repo? git config --get remote.origin.url ' 'returned %d: %s' % (ret, origin)) return 1 ret, _, _ = self.Call('git diff -q') if ret: print("Checkout is dirty, exiting") return 1 _, out, _ = self.Call('git rev-parse --show-toplevel', cwd=os.getcwd()) self.chromium_src_dir = out.strip() self.buildtools_dir = os.path.join(self.chromium_src_dir, 'buildtools') self.new_gn_commitish, self.new_gn_version = self.GetNewVersions() _, out, _ = self.Call('gn --version') self.old_gn_version = out.strip() _, out, _ = self.Call('git crrev-parse %s' % self.old_gn_version) self.old_gn_commitish = out.strip() return 0 def GetNewVersions(self): _, out, _ = self.Call('git log -1 --grep Cr-Commit-Position') commit_msg = out.splitlines() first_line = commit_msg[0] new_gn_commitish = first_line.split()[1] last_line = commit_msg[-1] new_gn_version = re.sub('.*master@{#(\d+)}', '\\1', last_line) return new_gn_commitish, new_gn_version def TriggerBuild(self): ret, _, _ = self.Call('git new-branch build_gn_%s' % self.new_gn_version) if ret: print('Failed to create a new branch for build_gn_%s' % self.new_gn_version) return 1 self.MakeDummyDepsChange() ret, out, err = self.Call('git commit -a -m "Build gn at %s"' % self.new_gn_version) if ret: print('git commit failed: %s' % out + err) return 1 print('Uploading CL to build GN at {#%s} - %s' % (self.new_gn_version, self.new_gn_commitish)) ret, out, err = self.Call('git cl upload -f') if ret: print('git-cl upload failed: %s' % out + err) return 1 print('Starting try jobs') self.Call('git-cl try -m tryserver.chromium.linux ' '-b linux_chromium_gn_upload -r %s' % self.new_gn_commitish) self.Call('git-cl try -m tryserver.chromium.mac ' '-b mac_chromium_gn_upload -r %s' % self.new_gn_commitish) self.Call('git-cl try -m tryserver.chromium.win ' '-b win8_chromium_gn_upload -r %s' % self.new_gn_commitish) return 0 def MakeDummyDepsChange(self): with open('DEPS') as fp: deps_content = fp.read() new_deps = deps_content.replace("'buildtools_revision':", "'buildtools_revision': ") with open('DEPS', 'w') as fp: fp.write(new_deps) def WaitForBuildToFinish(self): ret = self.CheckoutBuildBranch() if ret: return ret print('Checking build') results = self.CheckBuild() while (len(results) < 3 or any(r['state'] in ('pending', 'started') for r in results.values())): print() print('Sleeping for 30 seconds') time.sleep(30) print('Checking build') results = self.CheckBuild() ret = 0 if all(r['state'] == 'success' for r in results.values()) else 1 if ret: print('Build failed.') else: print('Builds ready.') # Close the build CL and move off of the build branch back to whatever # we were on before. self.Call('git-cl set-close') self.MoveToLastHead() return ret def CheckoutBuildBranch(self): ret, out, err = self.Call('git checkout build_gn_%s' % self.new_gn_version) if ret: print('Failed to check out build_gn_%s' % self.new_gn_version) if out: print(out) if err: print(err, file=sys.stderr) return ret def CheckBuild(self): _, out, _ = self.Call('git-cl issue') issue = int(out.split()[2]) _, out, _ = self.Call('git config user.email') email = '' rpc_server = upload.GetRpcServer(CODE_REVIEW_SERVER, email) try: props = json.loads(rpc_server.Send('/api/%d' % issue)) except Exception as _e: raise patchset = int(props['patchsets'][-1]) try: try_job_results = json.loads(rpc_server.Send( '/api/%d/%d/try_job_results' % (issue, patchset))) except Exception as _e: raise if not try_job_results: print('No try jobs found on most recent patchset') return {} results = {} for job in try_job_results: builder = job['builder'] if builder == 'linux_chromium_gn_upload': platform = 'linux64' elif builder == 'mac_chromium_gn_upload': platform = 'mac' elif builder == 'win8_chromium_gn_upload': platform = 'win' else: print('Unexpected builder: %s') continue TRY_JOB_RESULT_STATES = ('started', 'success', 'warnings', 'failure', 'skipped', 'exception', 'retry', 'pending') state = TRY_JOB_RESULT_STATES[int(job['result']) + 1] url_str = ' %s' % job['url'] build = url_str.split('/')[-1] sha1 = '-' results.setdefault(platform, {'build': -1, 'sha1': '', 'url': url_str}) if state == 'success': jsurl = url_str.replace('/builders/', '/json/builders/') fp = urllib2.urlopen(jsurl) js = json.loads(fp.read()) fp.close() sha1_step_name = 'gn sha1' for step in js['steps']: if step['name'] == sha1_step_name: # TODO: At some point infra changed the step text to # contain the step name; once all of the masters have been # restarted we can probably assert that the step text # with the step_name. sha1_step_text_prefix = sha1_step_name + '<br>' if step['text'][-1].startswith(sha1_step_text_prefix): sha1 = step['text'][-1][len(sha1_step_text_prefix):] else: sha1 = step['text'][-1] if results[platform]['build'] < build: results[platform]['build'] = build results[platform]['sha1'] = sha1 results[platform]['state'] = state results[platform]['url'] = url_str for platform, r in results.items(): print(platform) print(' sha1: %s' % r['sha1']) print(' state: %s' % r['state']) print(' build: %s' % r['build']) print(' url: %s' % r['url']) print() return results def RollBuildtools(self): ret = self.CheckoutBuildBranch() if ret: return ret results = self.CheckBuild() if (len(results) < 3 or not all(r['state'] == 'success' for r in results.values())): print("Roll isn't done or didn't succeed, exiting:") return 1 desc = self.GetBuildtoolsDesc() self.Call('git new-branch roll_buildtools_gn_%s' % self.new_gn_version, cwd=self.buildtools_dir) for platform in results: fname = 'gn.exe.sha1' if platform == 'win' else 'gn.sha1' path = os.path.join(self.buildtools_dir, platform, fname) with open(path, 'w') as fp: fp.write('%s\n' % results[platform]['sha1']) desc_file = tempfile.NamedTemporaryFile(delete=False) try: desc_file.write(desc) desc_file.close() self.Call('git commit -a -F %s' % desc_file.name, cwd=self.buildtools_dir) self.Call('git-cl upload -f --send-mail', cwd=self.buildtools_dir) finally: os.remove(desc_file.name) ret, out, err = self.Call('git cl land', cwd=self.buildtools_dir) if ret: print("buildtools git cl land failed: %d" % ret) if out: print(out) if err: print(err) return ret # Fetch the revision we just committed so that RollDEPS will find it. self.Call('git fetch', cwd=self.buildtools_dir) # Reset buildtools to the new commit so that we're not still on the # merged branch. self.Call('git checkout origin/master', cwd=self.buildtools_dir) _, out, _ = self.Call('git rev-parse origin/master', cwd=self.buildtools_dir) new_buildtools_commitish = out.strip() print('Ready to roll buildtools to %s in DEPS' % new_buildtools_commitish) return 0 def RollDEPS(self): ret, _, _ = self.Call('git new-branch roll_gn_%s' % self.new_gn_version) if ret: print('Failed to create a new branch for roll_gn_%s' % self.new_gn_version) return 1 _, out, _ = self.Call('git rev-parse origin/master', cwd=self.buildtools_dir) new_buildtools_commitish = out.strip() new_deps_lines = [] old_buildtools_commitish = '' with open(os.path.join(self.chromium_src_dir, 'DEPS')) as fp: for l in fp.readlines(): m = re.match(".*'buildtools_revision':.*'(.+)',", l) if m: old_buildtools_commitish = m.group(1) new_deps_lines.append(" 'buildtools_revision': '%s',\n" % new_buildtools_commitish) else: new_deps_lines.append(l) if not old_buildtools_commitish: print('Could not update DEPS properly, exiting') return 1 with open('DEPS', 'w') as fp: fp.write(''.join(new_deps_lines)) desc = self.GetDEPSRollDesc(old_buildtools_commitish, new_buildtools_commitish) desc_file = tempfile.NamedTemporaryFile(delete=False) try: desc_file.write(desc) desc_file.close() self.Call('git commit -a -F %s' % desc_file.name) self.Call('git-cl upload -f --send-mail --use-commit-queue') finally: os.remove(desc_file.name) # Move off of the roll branch onto whatever we were on before. # Do not explicitly close the roll CL issue, however; the CQ # will close it when the roll lands, assuming it does so. self.MoveToLastHead() return 0 def MoveToLastHead(self): # When this is called, there will be a commit + a checkout as # the two most recent entries in the reflog, assuming nothing as # modified the repo while this script has been running. _, out, _ = self.Call('git reflog -2') m = re.search('moving from ([^\s]+)', out) last_head = m.group(1) self.Call('git checkout %s' % last_head) def GetBuildtoolsDesc(self): gn_changes = self.GetGNChanges() return ( 'Roll gn %s..%s (r%s:r%s)\n' '\n' '%s' '\n' 'TBR=%s\n' % ( self.old_gn_commitish[:COMMITISH_DIGITS], self.new_gn_commitish[:COMMITISH_DIGITS], self.old_gn_version, self.new_gn_version, gn_changes, self.reviewer, )) def GetDEPSRollDesc(self, old_buildtools_commitish, new_buildtools_commitish): gn_changes = self.GetGNChanges() return ( 'Roll buildtools %s..%s\n' '\n' ' In order to roll GN %s..%s (r%s:r%s) and pick up\n' ' the following changes:\n' '\n' '%s' '\n' 'TBR=%s\n' % ( old_buildtools_commitish[:COMMITISH_DIGITS], new_buildtools_commitish[:COMMITISH_DIGITS], self.old_gn_commitish[:COMMITISH_DIGITS], self.new_gn_commitish[:COMMITISH_DIGITS], self.old_gn_version, self.new_gn_version, gn_changes, self.reviewer, )) def GetGNChanges(self): _, out, _ = self.Call( "git log --pretty=' %h %s' " + "%s..%s tools/gn" % (self.old_gn_commitish, self.new_gn_commitish)) return out def Call(self, cmd, cwd=None): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, cwd=(cwd or self.chromium_src_dir)) out, err = proc.communicate() return proc.returncode, out, err if __name__ == '__main__': roller = GNRoller() sys.exit(roller.Roll())
en
0.888959
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. An auto-roller for GN binaries into Chromium. This script is used to update the GN binaries that a Chromium checkout uses. In order to update the binaries, one must follow four steps in order: 1. Trigger try jobs to build a new GN binary at tip-of-tree and upload the newly-built binaries into the right Google CloudStorage bucket. 2. Wait for the try jobs to complete. 3. Update the buildtools repo with the .sha1 hashes of the newly built binaries. 4. Update Chromium's DEPS file to the new version of the buildtools repo. The script has four commands that correspond to the four steps above: 'build', 'wait', 'roll_buildtools', and 'roll_deps'. The script has a fifth command, 'roll', that runs the four in order. If given no arguments, the script will run the 'roll' command. It can only be run on linux in a clean Chromium checkout; it should error out in most cases if something bad happens, but the error checking isn't yet foolproof. #(\d+)}', '\\1', last_line) #%s} - %s' % # Close the build CL and move off of the build branch back to whatever # we were on before. # TODO: At some point infra changed the step text to # contain the step name; once all of the masters have been # restarted we can probably assert that the step text # with the step_name. # Fetch the revision we just committed so that RollDEPS will find it. # Reset buildtools to the new commit so that we're not still on the # merged branch. # Move off of the roll branch onto whatever we were on before. # Do not explicitly close the roll CL issue, however; the CQ # will close it when the roll lands, assuming it does so. # When this is called, there will be a commit + a checkout as # the two most recent entries in the reflog, assuming nothing as # modified the repo while this script has been running.
2.479336
2
src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py
HimashiRathnayake/adapter-transformers
50,404
6631485
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for LayoutLMv2.""" import collections import os import sys import unicodedata from typing import Dict, List, Optional, Tuple, Union from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt", "microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/layoutlmv2-base-uncased": 512, "microsoft/layoutlmv2-large-uncased": 512, } PRETRAINED_INIT_CONFIGURATION = { "microsoft/layoutlmv2-base-uncased": {"do_lower_case": True}, "microsoft/layoutlmv2-large-uncased": {"do_lower_case": True}, } LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (:obj:`int`, `optional`): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (:obj:`int`, `optional`, defaults to 0): If set to a number along with :obj:`max_length`, the overflowing tokens returned when :obj:`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. """ def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens table = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P")) def subfinder(mylist, pattern): matches = [] indices = [] for idx, i in enumerate(range(len(mylist))): if mylist[i] == pattern[0] and mylist[i : i + len(pattern)] == pattern: matches.append(pattern) indices.append(idx) if matches: return matches[0], indices[0] else: return None, 0 class LayoutLMv2Tokenizer(PreTrainedTokenizer): r""" Construct a LayoutLMv2 tokenizer. Based on WordPiece. :class:`~transformers.LayoutLMv2Tokenizer` can be used to turn words, word-level bounding boxes and optional word labels to token-level :obj:`input_ids`, :obj:`attention_mask`, :obj:`token_type_ids`, :obj:`bbox`, and optional :obj:`labels` (for token classification). This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. :class:`~transformers.LayoutLMv2Tokenizer` runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the word-level bounding boxes into token-level bounding boxes. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, model_max_length: int = 512, additional_special_tokens: Optional[List[str]] = None, **kwargs ): super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, model_max_length=model_max_length, additional_special_tokens=additional_special_tokens, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the token list is already formatted with special tokens for the model. Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (:obj:`List[str]`, :obj:`List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (:obj:`List[List[int]]`, :obj:`List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (:obj:`List[int]`, :obj:`List[List[int]]`, `optional`): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair assert boxes is not None, "You must provide corresponding bounding boxes" if is_batched: assert len(words) == len(boxes), "You must provide words and boxes for an equal amount of examples" for words_example, boxes_example in zip(words, boxes): assert len(words_example) == len( boxes_example ), "You must provide as many words as there are bounding boxes" else: assert len(words) == len(boxes), "You must provide as many words as there are bounding boxes" if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) batch_outputs = self._batch_prepare_for_model( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_text_or_text_pairs, is_pair: bool = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model( batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING) def encode( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> List[int]: """ ... """ encoded_inputs = self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) return encoded_inputs["input_ids"] @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, ``__call__`` should be used instead. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (:obj:`List[str]` or :obj:`List[int]`, `optional`): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) return self.prepare_for_model( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs ) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Word-level :obj:`boxes` are turned into token-level :obj:`bbox`. If provided, word-level :obj:`word_labels` are turned into token-level :obj:`labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (:obj:`List[str]` or :obj:`List[int]`, `optional`): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) tokens = [] pair_tokens = [] token_boxes = [] pair_token_boxes = [] labels = [] if text_pair is None: if word_labels is None: # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference) for word, box in zip(text, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) else: # CASE 2: token classification (training) for word, box, label in zip(text, boxes, word_labels): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) if self.only_label_first_subword: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: # CASE 3: document visual question answering (inference) # text = question # text_pair = words tokens = self.tokenize(text) token_boxes = [self.pad_token_box for _ in range(len(tokens))] for word, box in zip(text_pair, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) pair_token_boxes.extend([box] * len(word_tokens)) # Create ids + pair_ids ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None # Compute the total size of the returned encodings pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) = self.truncate_sequences( ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes encoded_inputs["overflowing_labels"] = overflowing_labels encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box] if pair_token_boxes: pair_token_boxes = pair_token_boxes + [self.sep_token_box] if labels: labels = [self.pad_token_label] + labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["bbox"] = token_boxes + pair_token_boxes if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if labels: encoded_inputs["labels"] = labels # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: List[int], token_boxes: List[List[int]], pair_ids: Optional[List[int]] = None, pair_token_boxes: Optional[List[List[int]]] = None, labels: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (:obj:`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the ``tokenize`` and ``convert_tokens_to_ids`` methods. token_boxes (:obj:`List[List[int]]`): Bounding boxes of the first sequence. pair_ids (:obj:`List[int]`, `optional`): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the ``tokenize`` and ``convert_tokens_to_ids`` methods. pair_token_boxes (:obj:`List[List[int]]`, `optional`): Bounding boxes of the second sequence. labels (:obj:`List[int]`, `optional`): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (:obj:`int`, `optional`, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (:obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`): The strategy to follow for truncation. Can be: * :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (:obj:`int`, `optional`, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: :obj:`Tuple[List[int], List[int], List[int]]`: The truncated ``ids``, the truncated ``pair_ids`` and the list of overflowing tokens. """ if num_tokens_to_remove <= 0: return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.LONGEST_FIRST: for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): if not overflowing_tokens: window_len = min(len(ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(ids[-window_len:]) overflowing_token_boxes.extend(token_boxes[-window_len:]) overflowing_labels.extend(labels[-window_len:]) ids = ids[:-1] token_boxes = token_boxes[:-1] labels = labels[:-1] else: if not overflowing_tokens: window_len = min(len(pair_ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(pair_ids[-window_len:]) overflowing_token_boxes.extend(pair_token_boxes[-window_len:]) pair_ids = pair_ids[:-1] pair_token_boxes = pair_token_boxes[:-1] elif truncation_strategy == TruncationStrategy.ONLY_FIRST: if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_token_boxes = token_boxes[-window_len:] overflowing_labels = labels[-window_len:] ids = ids[:-num_tokens_to_remove] token_boxes = token_boxes[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " f"for instance 'longest_first' or 'only_second'." ) elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_token_boxes = pair_token_boxes[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " f"for instance 'longest_first' or 'only_first'." ) return ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability >= 7.5 (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["bbox"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to lowercase the input when tokenizing. never_split (:obj:`Iterable`, `optional`): Collection of tokens which will never be split during tokenization. Only has an effect when :obj:`do_basic_tokenize=True` tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this `issue <https://github.com/huggingface/transformers/issues/328>`__). strip_accents: (:obj:`bool`, `optional`): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for :obj:`lowercase` (as in the original BERT). """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: **never_split**: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, :obj:`input = "unaffable"` wil return as output :obj:`["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for LayoutLMv2.""" import collections import os import sys import unicodedata from typing import Dict, List, Optional, Tuple, Union from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt", "microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/layoutlmv2-base-uncased": 512, "microsoft/layoutlmv2-large-uncased": 512, } PRETRAINED_INIT_CONFIGURATION = { "microsoft/layoutlmv2-base-uncased": {"do_lower_case": True}, "microsoft/layoutlmv2-large-uncased": {"do_lower_case": True}, } LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (:obj:`int`, `optional`): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (:obj:`int`, `optional`, defaults to 0): If set to a number along with :obj:`max_length`, the overflowing tokens returned when :obj:`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. """ def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens table = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P")) def subfinder(mylist, pattern): matches = [] indices = [] for idx, i in enumerate(range(len(mylist))): if mylist[i] == pattern[0] and mylist[i : i + len(pattern)] == pattern: matches.append(pattern) indices.append(idx) if matches: return matches[0], indices[0] else: return None, 0 class LayoutLMv2Tokenizer(PreTrainedTokenizer): r""" Construct a LayoutLMv2 tokenizer. Based on WordPiece. :class:`~transformers.LayoutLMv2Tokenizer` can be used to turn words, word-level bounding boxes and optional word labels to token-level :obj:`input_ids`, :obj:`attention_mask`, :obj:`token_type_ids`, :obj:`bbox`, and optional :obj:`labels` (for token classification). This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. :class:`~transformers.LayoutLMv2Tokenizer` runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the word-level bounding boxes into token-level bounding boxes. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, model_max_length: int = 512, additional_special_tokens: Optional[List[str]] = None, **kwargs ): super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, model_max_length=model_max_length, additional_special_tokens=additional_special_tokens, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the token list is already formatted with special tokens for the model. Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (:obj:`List[str]`, :obj:`List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (:obj:`List[List[int]]`, :obj:`List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (:obj:`List[int]`, :obj:`List[List[int]]`, `optional`): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair assert boxes is not None, "You must provide corresponding bounding boxes" if is_batched: assert len(words) == len(boxes), "You must provide words and boxes for an equal amount of examples" for words_example, boxes_example in zip(words, boxes): assert len(words_example) == len( boxes_example ), "You must provide as many words as there are bounding boxes" else: assert len(words) == len(boxes), "You must provide as many words as there are bounding boxes" if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) batch_outputs = self._batch_prepare_for_model( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_text_or_text_pairs, is_pair: bool = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model( batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING) def encode( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> List[int]: """ ... """ encoded_inputs = self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) return encoded_inputs["input_ids"] @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, ``__call__`` should be used instead. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (:obj:`List[str]` or :obj:`List[int]`, `optional`): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) return self.prepare_for_model( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs ) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Word-level :obj:`boxes` are turned into token-level :obj:`bbox`. If provided, word-level :obj:`word_labels` are turned into token-level :obj:`labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (:obj:`List[str]` or :obj:`List[int]`, `optional`): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) tokens = [] pair_tokens = [] token_boxes = [] pair_token_boxes = [] labels = [] if text_pair is None: if word_labels is None: # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference) for word, box in zip(text, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) else: # CASE 2: token classification (training) for word, box, label in zip(text, boxes, word_labels): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) if self.only_label_first_subword: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: # CASE 3: document visual question answering (inference) # text = question # text_pair = words tokens = self.tokenize(text) token_boxes = [self.pad_token_box for _ in range(len(tokens))] for word, box in zip(text_pair, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) pair_token_boxes.extend([box] * len(word_tokens)) # Create ids + pair_ids ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None # Compute the total size of the returned encodings pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) = self.truncate_sequences( ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes encoded_inputs["overflowing_labels"] = overflowing_labels encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box] if pair_token_boxes: pair_token_boxes = pair_token_boxes + [self.sep_token_box] if labels: labels = [self.pad_token_label] + labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["bbox"] = token_boxes + pair_token_boxes if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if labels: encoded_inputs["labels"] = labels # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: List[int], token_boxes: List[List[int]], pair_ids: Optional[List[int]] = None, pair_token_boxes: Optional[List[List[int]]] = None, labels: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (:obj:`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the ``tokenize`` and ``convert_tokens_to_ids`` methods. token_boxes (:obj:`List[List[int]]`): Bounding boxes of the first sequence. pair_ids (:obj:`List[int]`, `optional`): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the ``tokenize`` and ``convert_tokens_to_ids`` methods. pair_token_boxes (:obj:`List[List[int]]`, `optional`): Bounding boxes of the second sequence. labels (:obj:`List[int]`, `optional`): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (:obj:`int`, `optional`, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (:obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`): The strategy to follow for truncation. Can be: * :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (:obj:`int`, `optional`, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: :obj:`Tuple[List[int], List[int], List[int]]`: The truncated ``ids``, the truncated ``pair_ids`` and the list of overflowing tokens. """ if num_tokens_to_remove <= 0: return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.LONGEST_FIRST: for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): if not overflowing_tokens: window_len = min(len(ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(ids[-window_len:]) overflowing_token_boxes.extend(token_boxes[-window_len:]) overflowing_labels.extend(labels[-window_len:]) ids = ids[:-1] token_boxes = token_boxes[:-1] labels = labels[:-1] else: if not overflowing_tokens: window_len = min(len(pair_ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(pair_ids[-window_len:]) overflowing_token_boxes.extend(pair_token_boxes[-window_len:]) pair_ids = pair_ids[:-1] pair_token_boxes = pair_token_boxes[:-1] elif truncation_strategy == TruncationStrategy.ONLY_FIRST: if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_token_boxes = token_boxes[-window_len:] overflowing_labels = labels[-window_len:] ids = ids[:-num_tokens_to_remove] token_boxes = token_boxes[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " f"for instance 'longest_first' or 'only_second'." ) elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_token_boxes = pair_token_boxes[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " f"for instance 'longest_first' or 'only_first'." ) return ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability >= 7.5 (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["bbox"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to lowercase the input when tokenizing. never_split (:obj:`Iterable`, `optional`): Collection of tokens which will never be split during tokenization. Only has an effect when :obj:`do_basic_tokenize=True` tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this `issue <https://github.com/huggingface/transformers/issues/328>`__). strip_accents: (:obj:`bool`, `optional`): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for :obj:`lowercase` (as in the original BERT). """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: **never_split**: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, :obj:`input = "unaffable"` wil return as output :obj:`["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
en
0.738059
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tokenization class for LayoutLMv2. add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (:obj:`int`, `optional`): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (:obj:`int`, `optional`, defaults to 0): If set to a number along with :obj:`max_length`, the overflowing tokens returned when :obj:`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. Loads a vocabulary file into a dictionary. Runs basic whitespace cleaning and splitting on a piece of text. Construct a LayoutLMv2 tokenizer. Based on WordPiece. :class:`~transformers.LayoutLMv2Tokenizer` can be used to turn words, word-level bounding boxes and optional word labels to token-level :obj:`input_ids`, :obj:`attention_mask`, :obj:`token_type_ids`, :obj:`bbox`, and optional :obj:`labels` (for token classification). This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. :class:`~transformers.LayoutLMv2Tokenizer` runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the word-level bounding boxes into token-level bounding boxes. # additional properties # If the token is part of the never_split set Converts a token (str) in an id using the vocab. Converts an index (integer) in a token (str) using the vocab. Converts a sequence of tokens (string) in a single string. ##", "").strip() Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the token list is already formatted with special tokens for the model. Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (:obj:`List[str]`, :obj:`List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (:obj:`List[List[int]]`, :obj:`List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (:obj:`List[int]`, :obj:`List[List[int]]`, `optional`): Word-level integer labels (for token classification tasks such as FUNSD, CORD). # Input type checking for clearer error # Strings are fine # List are fine as long as they are... # ... empty # ... list of strings # ... list with an empty list or with a list of strings # in case text + text_pair are provided, text = questions, text_pair = words # in case only text is provided => must be words # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs # we pad in batch afterward # we pad in batch afterward # we pad in batch afterward # We convert the whole batch to tensors at the end ... Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, ``__call__`` should be used instead. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (:obj:`List[str]` or :obj:`List[int]`, `optional`): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Word-level :obj:`boxes` are turned into token-level :obj:`bbox`. If provided, word-level :obj:`word_labels` are turned into token-level :obj:`labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (:obj:`List[str]` or :obj:`List[int]`, `optional`): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference) # skip empty words # CASE 2: token classification (training) # skip empty words # Use the real label id for the first token of the word, and padding ids for the remaining tokens # CASE 3: document visual question answering (inference) # text = question # text_pair = words # skip empty words # Create ids + pair_ids # Compute the total size of the returned encodings # Truncation: Handle max sequence length # Load from model defaults # Add special tokens # Build output dictionary # Check lengths # Padding Truncates a sequence pair in-place following the strategy. Args: ids (:obj:`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the ``tokenize`` and ``convert_tokens_to_ids`` methods. token_boxes (:obj:`List[List[int]]`): Bounding boxes of the first sequence. pair_ids (:obj:`List[int]`, `optional`): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the ``tokenize`` and ``convert_tokens_to_ids`` methods. pair_token_boxes (:obj:`List[List[int]]`, `optional`): Bounding boxes of the second sequence. labels (:obj:`List[int]`, `optional`): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (:obj:`int`, `optional`, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (:obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`): The strategy to follow for truncation. Can be: * :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (:obj:`int`, `optional`, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: :obj:`Tuple[List[int], List[int], List[int]]`: The truncated ``ids``, the truncated ``pair_ids`` and the list of overflowing tokens. Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability >= 7.5 (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) # Load from model defaults # Initialize attention mask if not present. # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to lowercase the input when tokenizing. never_split (:obj:`Iterable`, `optional`): Collection of tokens which will never be split during tokenization. Only has an effect when :obj:`do_basic_tokenize=True` tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this `issue <https://github.com/huggingface/transformers/issues/328>`__). strip_accents: (:obj:`bool`, `optional`): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for :obj:`lowercase` (as in the original BERT). Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: **never_split**: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. # union() returns a new set by concatenating the two sets. # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). Strips accents from a piece of text. Splits punctuation on a piece of text. Adds whitespace around any CJK character. Checks whether CP is the codepoint of a CJK character. # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. # # # # # # # Performs invalid character removal and whitespace cleanup on text. # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer Runs WordPiece tokenization. Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, :obj:`input = "unaffable"` wil return as output :obj:`["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. #" + substr
1.76468
2
frappe-bench/apps/erpnext/erpnext/accounts/doctype/account/chart_of_accounts/chart_of_accounts.py
Semicheche/foa_frappe_docker
0
6631486
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe, os, json from frappe.utils import cstr from unidecode import unidecode def create_charts(company, chart_template=None, existing_company=None): chart = get_chart(chart_template, existing_company) if chart: accounts = [] def _import_accounts(children, parent, root_type, root_account=False): for account_name, child in children.items(): if root_account: root_type = child.get("root_type") if account_name not in ["account_number", "account_type", "root_type", "is_group", "tax_rate"]: account_number = cstr(child.get("account_number")).strip() account_name, account_name_in_db = add_suffix_if_duplicate(account_name, account_number, accounts) is_group = identify_is_group(child) report_type = "Balance Sheet" if root_type in ["Asset", "Liability", "Equity"] \ else "Profit and Loss" account = frappe.get_doc({ "doctype": "Account", "account_name": account_name, "company": company, "parent_account": parent, "is_group": is_group, "root_type": root_type, "report_type": report_type, "account_number": account_number, "account_type": child.get("account_type"), "account_currency": frappe.db.get_value("Company", company, "default_currency"), "tax_rate": child.get("tax_rate") }) if root_account or frappe.local.flags.allow_unverified_charts: account.flags.ignore_mandatory = True account.flags.ignore_permissions = True account.insert() accounts.append(account_name_in_db) _import_accounts(child, account.name, root_type) _import_accounts(chart, None, None, root_account=True) def add_suffix_if_duplicate(account_name, account_number, accounts): if account_number: account_name_in_db = unidecode(" - ".join([account_number, account_name.strip().lower()])) else: account_name_in_db = unidecode(account_name.strip().lower()) if account_name_in_db in accounts: count = accounts.count(account_name_in_db) account_name = account_name + " " + cstr(count) return account_name, account_name_in_db def identify_is_group(child): if child.get("is_group"): is_group = child.get("is_group") elif len(set(child.keys()) - set(["account_type", "root_type", "is_group", "tax_rate", "account_number"])): is_group = 1 else: is_group = 0 return is_group def get_chart(chart_template, existing_company=None): chart = {} if existing_company: return get_account_tree_from_existing_company(existing_company) elif chart_template == "Standard": from erpnext.accounts.doctype.account.chart_of_accounts.verified import standard_chart_of_accounts return standard_chart_of_accounts.get() elif chart_template == "Standard with Numbers": from erpnext.accounts.doctype.account.chart_of_accounts.verified \ import standard_chart_of_accounts_with_account_number return standard_chart_of_accounts_with_account_number.get() else: folders = ("verified",) if frappe.local.flags.allow_unverified_charts: folders = ("verified", "unverified") for folder in folders: path = os.path.join(os.path.dirname(__file__), folder) for fname in os.listdir(path): fname = frappe.as_unicode(fname) if fname.endswith(".json"): with open(os.path.join(path, fname), "r") as f: chart = f.read() if chart and json.loads(chart).get("name") == chart_template: return json.loads(chart).get("tree") @frappe.whitelist() def get_charts_for_country(country, with_standard=False): charts = [] def _get_chart_name(content): if content: content = json.loads(content) if (content and content.get("disabled", "No") == "No") \ or frappe.local.flags.allow_unverified_charts: charts.append(content["name"]) country_code = frappe.db.get_value("Country", country, "code") if country_code: folders = ("verified",) if frappe.local.flags.allow_unverified_charts: folders = ("verified", "unverified") for folder in folders: path = os.path.join(os.path.dirname(__file__), folder) for fname in os.listdir(path): fname = frappe.as_unicode(fname) if (fname.startswith(country_code) or fname.startswith(country)) and fname.endswith(".json"): with open(os.path.join(path, fname), "r") as f: _get_chart_name(f.read()) if len(charts) != 1 or with_standard: charts += ["Standard", "Standard with Numbers"] return charts def get_account_tree_from_existing_company(existing_company): all_accounts = frappe.get_all('Account', filters={'company': existing_company}, fields = ["name", "account_name", "parent_account", "account_type", "is_group", "root_type", "tax_rate", "account_number"], order_by="lft, rgt") account_tree = {} # fill in tree starting with root accounts (those with no parent) if all_accounts: build_account_tree(account_tree, None, all_accounts) return account_tree def build_account_tree(tree, parent, all_accounts): # find children parent_account = parent.name if parent else "" children = [acc for acc in all_accounts if cstr(acc.parent_account) == parent_account] # if no children, but a group account if not children and parent.is_group: tree["is_group"] = 1 tree["account_number"] = parent.account_number # build a subtree for each child for child in children: # start new subtree tree[child.account_name] = {} # assign account_type and root_type if child.account_type: tree[child.account_name]["account_number"] = child.account_number if child.account_type: tree[child.account_name]["account_type"] = child.account_type if child.tax_rate: tree[child.account_name]["tax_rate"] = child.tax_rate if not parent: tree[child.account_name]["root_type"] = child.root_type # call recursively to build a subtree for current account build_account_tree(tree[child.account_name], child, all_accounts) @frappe.whitelist() def validate_bank_account(coa, bank_account): accounts = [] chart = get_chart(coa) if chart: def _get_account_names(account_master): for account_name, child in account_master.items(): if account_name not in ["account_number", "account_type", "root_type", "is_group", "tax_rate"]: accounts.append(account_name) _get_account_names(child) _get_account_names(chart) return (bank_account in accounts)
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe, os, json from frappe.utils import cstr from unidecode import unidecode def create_charts(company, chart_template=None, existing_company=None): chart = get_chart(chart_template, existing_company) if chart: accounts = [] def _import_accounts(children, parent, root_type, root_account=False): for account_name, child in children.items(): if root_account: root_type = child.get("root_type") if account_name not in ["account_number", "account_type", "root_type", "is_group", "tax_rate"]: account_number = cstr(child.get("account_number")).strip() account_name, account_name_in_db = add_suffix_if_duplicate(account_name, account_number, accounts) is_group = identify_is_group(child) report_type = "Balance Sheet" if root_type in ["Asset", "Liability", "Equity"] \ else "Profit and Loss" account = frappe.get_doc({ "doctype": "Account", "account_name": account_name, "company": company, "parent_account": parent, "is_group": is_group, "root_type": root_type, "report_type": report_type, "account_number": account_number, "account_type": child.get("account_type"), "account_currency": frappe.db.get_value("Company", company, "default_currency"), "tax_rate": child.get("tax_rate") }) if root_account or frappe.local.flags.allow_unverified_charts: account.flags.ignore_mandatory = True account.flags.ignore_permissions = True account.insert() accounts.append(account_name_in_db) _import_accounts(child, account.name, root_type) _import_accounts(chart, None, None, root_account=True) def add_suffix_if_duplicate(account_name, account_number, accounts): if account_number: account_name_in_db = unidecode(" - ".join([account_number, account_name.strip().lower()])) else: account_name_in_db = unidecode(account_name.strip().lower()) if account_name_in_db in accounts: count = accounts.count(account_name_in_db) account_name = account_name + " " + cstr(count) return account_name, account_name_in_db def identify_is_group(child): if child.get("is_group"): is_group = child.get("is_group") elif len(set(child.keys()) - set(["account_type", "root_type", "is_group", "tax_rate", "account_number"])): is_group = 1 else: is_group = 0 return is_group def get_chart(chart_template, existing_company=None): chart = {} if existing_company: return get_account_tree_from_existing_company(existing_company) elif chart_template == "Standard": from erpnext.accounts.doctype.account.chart_of_accounts.verified import standard_chart_of_accounts return standard_chart_of_accounts.get() elif chart_template == "Standard with Numbers": from erpnext.accounts.doctype.account.chart_of_accounts.verified \ import standard_chart_of_accounts_with_account_number return standard_chart_of_accounts_with_account_number.get() else: folders = ("verified",) if frappe.local.flags.allow_unverified_charts: folders = ("verified", "unverified") for folder in folders: path = os.path.join(os.path.dirname(__file__), folder) for fname in os.listdir(path): fname = frappe.as_unicode(fname) if fname.endswith(".json"): with open(os.path.join(path, fname), "r") as f: chart = f.read() if chart and json.loads(chart).get("name") == chart_template: return json.loads(chart).get("tree") @frappe.whitelist() def get_charts_for_country(country, with_standard=False): charts = [] def _get_chart_name(content): if content: content = json.loads(content) if (content and content.get("disabled", "No") == "No") \ or frappe.local.flags.allow_unverified_charts: charts.append(content["name"]) country_code = frappe.db.get_value("Country", country, "code") if country_code: folders = ("verified",) if frappe.local.flags.allow_unverified_charts: folders = ("verified", "unverified") for folder in folders: path = os.path.join(os.path.dirname(__file__), folder) for fname in os.listdir(path): fname = frappe.as_unicode(fname) if (fname.startswith(country_code) or fname.startswith(country)) and fname.endswith(".json"): with open(os.path.join(path, fname), "r") as f: _get_chart_name(f.read()) if len(charts) != 1 or with_standard: charts += ["Standard", "Standard with Numbers"] return charts def get_account_tree_from_existing_company(existing_company): all_accounts = frappe.get_all('Account', filters={'company': existing_company}, fields = ["name", "account_name", "parent_account", "account_type", "is_group", "root_type", "tax_rate", "account_number"], order_by="lft, rgt") account_tree = {} # fill in tree starting with root accounts (those with no parent) if all_accounts: build_account_tree(account_tree, None, all_accounts) return account_tree def build_account_tree(tree, parent, all_accounts): # find children parent_account = parent.name if parent else "" children = [acc for acc in all_accounts if cstr(acc.parent_account) == parent_account] # if no children, but a group account if not children and parent.is_group: tree["is_group"] = 1 tree["account_number"] = parent.account_number # build a subtree for each child for child in children: # start new subtree tree[child.account_name] = {} # assign account_type and root_type if child.account_type: tree[child.account_name]["account_number"] = child.account_number if child.account_type: tree[child.account_name]["account_type"] = child.account_type if child.tax_rate: tree[child.account_name]["tax_rate"] = child.tax_rate if not parent: tree[child.account_name]["root_type"] = child.root_type # call recursively to build a subtree for current account build_account_tree(tree[child.account_name], child, all_accounts) @frappe.whitelist() def validate_bank_account(coa, bank_account): accounts = [] chart = get_chart(coa) if chart: def _get_account_names(account_master): for account_name, child in account_master.items(): if account_name not in ["account_number", "account_type", "root_type", "is_group", "tax_rate"]: accounts.append(account_name) _get_account_names(child) _get_account_names(chart) return (bank_account in accounts)
en
0.768632
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt # fill in tree starting with root accounts (those with no parent) # find children # if no children, but a group account # build a subtree for each child # start new subtree # assign account_type and root_type # call recursively to build a subtree for current account
1.935029
2
yaojikai/20180327/h2.py
python20180319howmework/homework
0
6631487
<filename>yaojikai/20180327/h2.py #2,随机产生一个正整数,不要使用python内置方法,求得其二进制表达,并输出 import random l = [] num = random.randint(1,100) numb = num for i in range(100): if num > 0: res = num % 2 num//=2 l.append(res) l.reverse() print("随机生成的正整数为{},它的二进制表达是{}".format(numb,l))
<filename>yaojikai/20180327/h2.py #2,随机产生一个正整数,不要使用python内置方法,求得其二进制表达,并输出 import random l = [] num = random.randint(1,100) numb = num for i in range(100): if num > 0: res = num % 2 num//=2 l.append(res) l.reverse() print("随机生成的正整数为{},它的二进制表达是{}".format(numb,l))
zh
0.995518
#2,随机产生一个正整数,不要使用python内置方法,求得其二进制表达,并输出
3.543941
4
shadowreader/plugins/loader_middleware.py
luckymike/shadowreader
150
6631488
<gh_stars>100-1000 def _transform_uri(uri: str, base_url: str) -> str: url = f"{base_url}{uri}" return url def _transform_load(load: list, base_url: str) -> list: if "request_uri" in load[0]: uri_key = "request_uri" elif "uri" in load[0]: uri_key = "uri" for l in load: l["url"] = _transform_uri(l[uri_key], base_url) return load def main(**kwargs) -> list: load = kwargs.get("load", []) base_url = kwargs.get("base_url", "") if not base_url: raise ValueError(f"Base URL was not set! {base_url}") if load: load = _transform_load(load, base_url) return load
def _transform_uri(uri: str, base_url: str) -> str: url = f"{base_url}{uri}" return url def _transform_load(load: list, base_url: str) -> list: if "request_uri" in load[0]: uri_key = "request_uri" elif "uri" in load[0]: uri_key = "uri" for l in load: l["url"] = _transform_uri(l[uri_key], base_url) return load def main(**kwargs) -> list: load = kwargs.get("load", []) base_url = kwargs.get("base_url", "") if not base_url: raise ValueError(f"Base URL was not set! {base_url}") if load: load = _transform_load(load, base_url) return load
none
1
2.659269
3
sheraf/types/__init__.py
yaal-fr/sheraf
1
6631489
<filename>sheraf/types/__init__.py """Types are used to define the internal storage in ZODB. There are no need to setup specific types in basic usage of sheraf because `Model` uses `xAttribute`, not `Type`. """ import BTrees.Length import BTrees.OOBTree import persistent import ZODB import sheraf.tools.dicttools from .largedict import LargeDict from .largelist import LargeList assert LargeDict assert LargeList SmallList = persistent.list.PersistentList List = SmallList Set = BTrees.OOBTree.OOTreeSet class SmallDict(persistent.mapping.PersistentMapping): """SmallDict is a :class:`PersistentMapping` implementation with a simple conflict resolution implementation. When two different keys of the mapping are edited in concurrency, no conflict is raised. """ def _p_resolveConflict(self, old, saved, commited): try: return sheraf.tools.dicttools.merge(old, saved, commited) except sheraf.tools.dicttools.DictConflictException: raise ZODB.POSException.ConflictError()
<filename>sheraf/types/__init__.py """Types are used to define the internal storage in ZODB. There are no need to setup specific types in basic usage of sheraf because `Model` uses `xAttribute`, not `Type`. """ import BTrees.Length import BTrees.OOBTree import persistent import ZODB import sheraf.tools.dicttools from .largedict import LargeDict from .largelist import LargeList assert LargeDict assert LargeList SmallList = persistent.list.PersistentList List = SmallList Set = BTrees.OOBTree.OOTreeSet class SmallDict(persistent.mapping.PersistentMapping): """SmallDict is a :class:`PersistentMapping` implementation with a simple conflict resolution implementation. When two different keys of the mapping are edited in concurrency, no conflict is raised. """ def _p_resolveConflict(self, old, saved, commited): try: return sheraf.tools.dicttools.merge(old, saved, commited) except sheraf.tools.dicttools.DictConflictException: raise ZODB.POSException.ConflictError()
en
0.855488
Types are used to define the internal storage in ZODB. There are no need to setup specific types in basic usage of sheraf because `Model` uses `xAttribute`, not `Type`. SmallDict is a :class:`PersistentMapping` implementation with a simple conflict resolution implementation. When two different keys of the mapping are edited in concurrency, no conflict is raised.
2.495205
2
utils/utils_spam.py
surrealyz/DeepBayes
0
6631490
<gh_stars>0 from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import sys import warnings import os, socket def to_categorical(y, num_classes=None): y = np.array(y, dtype='int').ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes)) categorical[np.arange(n), y] = 1 return categorical def data_spam(datadir='/home/yz/code/trees/twitter_spam/', train_start=0, train_end=295870, test_start=0, test_end=126082): """ Load and preprocess MNIST dataset :param datadir: path to folder where data should be stored :param train_start: index of first training set example :param train_end: index of last training set example :param test_start: index of first test set example :param test_end: index of last test set example :return: tuple of four arrays containing training data, training labels, testing data and testing labels. """ assert isinstance(train_start, int) assert isinstance(train_end, int) assert isinstance(test_start, int) assert isinstance(test_end, int) if socket.gethostname() == 'deep': datadir = '/home/mcz/Desktop/courses_2019F/DavidBlei/Project/' elif socket.gethostname() == 'hulk': datadir = '/home/mcz/Spam/' train = np.loadtxt(datadir+"twitter_spam_reduced.train.csv", delimiter=",") test = np.loadtxt(datadir+"twitter_spam_reduced.test.csv", delimiter=",") X_train = train[:, 1:] Y_train = train[:, :1].flatten() X_test = test[:, 1:] Y_test = test[:, :1].flatten() X_train = X_train[train_start:train_end] Y_train = Y_train[train_start:train_end] X_test = X_test[test_start:test_end] Y_test = Y_test[test_start:test_end] Y_train = to_categorical(Y_train, 2) Y_test = to_categorical(Y_test, 2) print('Spam X_train shape:', X_train.shape) print('Spam X_test shape:', X_test.shape) return X_train, Y_train, X_test, Y_test
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import sys import warnings import os, socket def to_categorical(y, num_classes=None): y = np.array(y, dtype='int').ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes)) categorical[np.arange(n), y] = 1 return categorical def data_spam(datadir='/home/yz/code/trees/twitter_spam/', train_start=0, train_end=295870, test_start=0, test_end=126082): """ Load and preprocess MNIST dataset :param datadir: path to folder where data should be stored :param train_start: index of first training set example :param train_end: index of last training set example :param test_start: index of first test set example :param test_end: index of last test set example :return: tuple of four arrays containing training data, training labels, testing data and testing labels. """ assert isinstance(train_start, int) assert isinstance(train_end, int) assert isinstance(test_start, int) assert isinstance(test_end, int) if socket.gethostname() == 'deep': datadir = '/home/mcz/Desktop/courses_2019F/DavidBlei/Project/' elif socket.gethostname() == 'hulk': datadir = '/home/mcz/Spam/' train = np.loadtxt(datadir+"twitter_spam_reduced.train.csv", delimiter=",") test = np.loadtxt(datadir+"twitter_spam_reduced.test.csv", delimiter=",") X_train = train[:, 1:] Y_train = train[:, :1].flatten() X_test = test[:, 1:] Y_test = test[:, :1].flatten() X_train = X_train[train_start:train_end] Y_train = Y_train[train_start:train_end] X_test = X_test[test_start:test_end] Y_test = Y_test[test_start:test_end] Y_train = to_categorical(Y_train, 2) Y_test = to_categorical(Y_test, 2) print('Spam X_train shape:', X_train.shape) print('Spam X_test shape:', X_test.shape) return X_train, Y_train, X_test, Y_test
en
0.716745
Load and preprocess MNIST dataset :param datadir: path to folder where data should be stored :param train_start: index of first training set example :param train_end: index of last training set example :param test_start: index of first test set example :param test_end: index of last test set example :return: tuple of four arrays containing training data, training labels, testing data and testing labels.
2.832583
3
uspto_tools/parse/patent.py
clicumu/uspto-tools
0
6631491
""" This module containt simple wrapper classes for a US-patent, inventor, patent-reference and classification. """ import re class USPatent: """ A single patent instance. """ def __init__(self, clean_id=True, **kwargs): self._patent_number = None self.date = None self.country = None self.series_code = None self.kind = None self.application_number = None self.application_type = None self.application_country = None self.application_date = None self.art_unit = None self.title = None self.primary_examiner = None self.parent_code = None self.parent_application_number = None self.parent_status_code = None self.us_references = list() self.inventors = list() self.claims = list() self.design_claims = None self.abstract = None self.brief_summary = None self.description = None self.patent_classification = None self.clean_id = clean_id _set_attributes_from_kwargs(self, kwargs) @property def patent_number(self): return self._patent_number @patent_number.setter def patent_number(self, patent_number): if self.clean_id: patent_number = re.sub(r'[^a-zA-Z\d]', '', patent_number) self._patent_number = patent_number @property def document_id(self): document_id = ' '.join(filter(None, [self.country, self.patent_number, self.kind])) if self.date is not None: document_id += '-{}'.format(self.date) return document_id def __repr__(self): return '<{}: {}>'.format(self.__class__.__name__, self.document_id) class Inventor: """ Patent inventor. """ def __init__(self, **kwargs): self.name = None self.country = None self.city = None self.zip_code = None self.state = None _set_attributes_from_kwargs(self, kwargs) def __repr__(self): return '{}(name="{}", city="{}")'.format(self.__class__.__name__, self.name, self.city) class PatentClassification: def __init__(self, **kwargs): self.us_classification = None self.cross_reference = None self.unofficial_reference = None self.digest_reference = None self.edition_field = None self.international_classification = None self.field_of_search_class = None self.field_of_search_subclasses = None _set_attributes_from_kwargs(self, kwargs) def __repr__(self): return '{}(us_classification="{}")'.format(self.__class__.__name__, self.us_classification) class USReference: def __init__(self, **kwargs): self.patent_number = None self.issue_date = None self.patentee_name = None self.country = None _set_attributes_from_kwargs(self, kwargs) def __repr__(self): base_str = '{}(patent_number="{}", issue_date="{}", patentee_name="{}")' return base_str.format(self.__class__.__name__, self.patent_number, self.issue_date, self.patentee_name) def _set_attributes_from_kwargs(instance, kwargs): """ Set attributes of `instance` from keywords in `kwargs.` Parameters ---------- instance : Any Target-instance. kwargs : dict Keyword-arguments. Raises ------ ValueError If any key in `kwargs` does not match any attribute of `instance`. """ for key, value in kwargs.items(): if hasattr(instance, key): setattr(instance, key, value) else: raise ValueError('Invalid key-word argument: {}'.format(key))
""" This module containt simple wrapper classes for a US-patent, inventor, patent-reference and classification. """ import re class USPatent: """ A single patent instance. """ def __init__(self, clean_id=True, **kwargs): self._patent_number = None self.date = None self.country = None self.series_code = None self.kind = None self.application_number = None self.application_type = None self.application_country = None self.application_date = None self.art_unit = None self.title = None self.primary_examiner = None self.parent_code = None self.parent_application_number = None self.parent_status_code = None self.us_references = list() self.inventors = list() self.claims = list() self.design_claims = None self.abstract = None self.brief_summary = None self.description = None self.patent_classification = None self.clean_id = clean_id _set_attributes_from_kwargs(self, kwargs) @property def patent_number(self): return self._patent_number @patent_number.setter def patent_number(self, patent_number): if self.clean_id: patent_number = re.sub(r'[^a-zA-Z\d]', '', patent_number) self._patent_number = patent_number @property def document_id(self): document_id = ' '.join(filter(None, [self.country, self.patent_number, self.kind])) if self.date is not None: document_id += '-{}'.format(self.date) return document_id def __repr__(self): return '<{}: {}>'.format(self.__class__.__name__, self.document_id) class Inventor: """ Patent inventor. """ def __init__(self, **kwargs): self.name = None self.country = None self.city = None self.zip_code = None self.state = None _set_attributes_from_kwargs(self, kwargs) def __repr__(self): return '{}(name="{}", city="{}")'.format(self.__class__.__name__, self.name, self.city) class PatentClassification: def __init__(self, **kwargs): self.us_classification = None self.cross_reference = None self.unofficial_reference = None self.digest_reference = None self.edition_field = None self.international_classification = None self.field_of_search_class = None self.field_of_search_subclasses = None _set_attributes_from_kwargs(self, kwargs) def __repr__(self): return '{}(us_classification="{}")'.format(self.__class__.__name__, self.us_classification) class USReference: def __init__(self, **kwargs): self.patent_number = None self.issue_date = None self.patentee_name = None self.country = None _set_attributes_from_kwargs(self, kwargs) def __repr__(self): base_str = '{}(patent_number="{}", issue_date="{}", patentee_name="{}")' return base_str.format(self.__class__.__name__, self.patent_number, self.issue_date, self.patentee_name) def _set_attributes_from_kwargs(instance, kwargs): """ Set attributes of `instance` from keywords in `kwargs.` Parameters ---------- instance : Any Target-instance. kwargs : dict Keyword-arguments. Raises ------ ValueError If any key in `kwargs` does not match any attribute of `instance`. """ for key, value in kwargs.items(): if hasattr(instance, key): setattr(instance, key, value) else: raise ValueError('Invalid key-word argument: {}'.format(key))
en
0.50555
This module containt simple wrapper classes for a US-patent, inventor, patent-reference and classification. A single patent instance. Patent inventor. Set attributes of `instance` from keywords in `kwargs.` Parameters ---------- instance : Any Target-instance. kwargs : dict Keyword-arguments. Raises ------ ValueError If any key in `kwargs` does not match any attribute of `instance`.
2.672079
3
ibeatles/step3/gui_handler.py
indudhiman/bragg-edge
0
6631492
<reponame>indudhiman/bragg-edge from ibeatles.step1.plot import Step1Plot from ibeatles.utilities.retrieve_data_infos import RetrieveGeneralFileInfos, RetrieveSelectedFileDataInfos class Step3GuiHandler(object): def __init__(self, parent=None): self.parent = parent def load_normalized_changed(self, tab_index=0): if tab_index == 0: data_preview_box_label = "Sample Image Preview" o_general_infos = RetrieveGeneralFileInfos(parent = self.parent, data_type = 'sample') o_selected_infos = RetrieveSelectedFileDataInfos(parent = self.parent, data_type = 'sample') else: data_preview_box_label = "Open Beam Image Preview" o_general_infos = RetrieveGeneralFileInfos(parent = self.parent, data_type = 'ob') o_selected_infos = RetrieveSelectedFileDataInfos(parent = self.parent, data_type = 'ob') self.parent.ui.data_preview_box.setTitle(data_preview_box_label) o_general_infos.update() o_selected_infos.update() def select_normalized_row(self, row=0): self.parent.ui.list_normalized.setCurrentRow(row) # o_step1_plot = Step1Plot(parent = self.parent) # o_step1_plot.display_2d_preview() def check_time_spectra_widgets(self): time_spectra_data = self.parent.data_metadata['time_spectra']['normalized_folder'] if self.parent.ui.material_display_checkbox_2.isChecked(): if time_spectra_data == []: _display_error_label = True else: _display_error_label = False else: _display_error_label = False self.parent.ui.display_warning_2.setVisible(_display_error_label) def check_widgets(self): if self.parent.data_files['normalized'] == []: status = False else: status = True self.parent.ui.actionRotate_Images.setEnabled(True)
from ibeatles.step1.plot import Step1Plot from ibeatles.utilities.retrieve_data_infos import RetrieveGeneralFileInfos, RetrieveSelectedFileDataInfos class Step3GuiHandler(object): def __init__(self, parent=None): self.parent = parent def load_normalized_changed(self, tab_index=0): if tab_index == 0: data_preview_box_label = "Sample Image Preview" o_general_infos = RetrieveGeneralFileInfos(parent = self.parent, data_type = 'sample') o_selected_infos = RetrieveSelectedFileDataInfos(parent = self.parent, data_type = 'sample') else: data_preview_box_label = "Open Beam Image Preview" o_general_infos = RetrieveGeneralFileInfos(parent = self.parent, data_type = 'ob') o_selected_infos = RetrieveSelectedFileDataInfos(parent = self.parent, data_type = 'ob') self.parent.ui.data_preview_box.setTitle(data_preview_box_label) o_general_infos.update() o_selected_infos.update() def select_normalized_row(self, row=0): self.parent.ui.list_normalized.setCurrentRow(row) # o_step1_plot = Step1Plot(parent = self.parent) # o_step1_plot.display_2d_preview() def check_time_spectra_widgets(self): time_spectra_data = self.parent.data_metadata['time_spectra']['normalized_folder'] if self.parent.ui.material_display_checkbox_2.isChecked(): if time_spectra_data == []: _display_error_label = True else: _display_error_label = False else: _display_error_label = False self.parent.ui.display_warning_2.setVisible(_display_error_label) def check_widgets(self): if self.parent.data_files['normalized'] == []: status = False else: status = True self.parent.ui.actionRotate_Images.setEnabled(True)
en
0.251803
# o_step1_plot = Step1Plot(parent = self.parent) # o_step1_plot.display_2d_preview()
2.012195
2
src/OCR/architecture/util.py
tsteffek/LicensePlateReconstructor
2
6631493
<reponame>tsteffek/LicensePlateReconstructor<gh_stars>1-10 import logging from typing import Iterable, Any, List import torch from pytorch_lightning.metrics import Metric from torch import Tensor from torch import nn log = logging.getLogger('pytorch_lightning').getChild(__name__) class Img2Seq(nn.Module): def __init__(self): super().__init__() @staticmethod def forward(x: Tensor): B, C, H, W = x.shape x = x.reshape(B, -1, W) x = x.permute(2, 0, 1) # width/time step, batch, channel return x class ConfusionMatrix(Metric): def __init__(self, classes: List[Any]): super().__init__() self.classes = classes self.add_state( 'matrix', default=torch.full((len(classes), len(classes)), fill_value=0, dtype=torch.int64), dist_reduce_fx=lambda x: torch.sum(x, dim=-1) ) def compute(self) -> str: if torch.max(self.matrix) == 0: return '\nConfusion Matrix: nothing registered.' try: total = self.matrix.sum() tp = self.matrix.diagonal().sum() fp = total - tp str_matrix = '\nConfusion Matrix:\n' \ f'Total: {total} | Correct: {tp} | Wrong: {fp} | Acc: {tp / total}' \ '\n \t' + '\t'.join(self.classes) + '\tacc\ttotal' row_totals = self.matrix.sum(dim=1) row_accs = self.matrix.diagonal() / row_totals for char, row, acc, total in zip(self.classes, self.matrix, row_accs, row_totals): if total != 0: str_matrix += f'\n{char}\t' + '\t'.join(tensor_to_list(row)) + \ f'\t{tensor_to_string(acc)}\t{tensor_to_string(total)}' col_totals = self.matrix.sum(dim=0) col_accs = self.matrix.diagonal() / col_totals str_matrix += '\n \t' + '\t'.join(tensor_to_list(col_accs)) str_matrix += '\n \t' + '\t'.join(tensor_to_list(col_totals)) return str_matrix except: log.warning(f'Confusion Matrix failed for tensor of shape {self.matrix.shape}: {self.matrix}') def update(self, preds: Tensor, target: Tensor): # # this # uniques, count = torch.unique(torch.stack((targets, predictions)), return_counts=True) # t, p = uniques.unbind() # self.mat[t, p] = self.mat[t, p] + count # # or that for t, p in zip(target, preds): self.matrix[t, p] = self.matrix[t, p] + 1 def tensor_to_string(t: Tensor) -> str: return str(t.item()) def tensor_to_list(t: Tensor) -> Iterable[str]: return map(tensor_to_string, list(t))
import logging from typing import Iterable, Any, List import torch from pytorch_lightning.metrics import Metric from torch import Tensor from torch import nn log = logging.getLogger('pytorch_lightning').getChild(__name__) class Img2Seq(nn.Module): def __init__(self): super().__init__() @staticmethod def forward(x: Tensor): B, C, H, W = x.shape x = x.reshape(B, -1, W) x = x.permute(2, 0, 1) # width/time step, batch, channel return x class ConfusionMatrix(Metric): def __init__(self, classes: List[Any]): super().__init__() self.classes = classes self.add_state( 'matrix', default=torch.full((len(classes), len(classes)), fill_value=0, dtype=torch.int64), dist_reduce_fx=lambda x: torch.sum(x, dim=-1) ) def compute(self) -> str: if torch.max(self.matrix) == 0: return '\nConfusion Matrix: nothing registered.' try: total = self.matrix.sum() tp = self.matrix.diagonal().sum() fp = total - tp str_matrix = '\nConfusion Matrix:\n' \ f'Total: {total} | Correct: {tp} | Wrong: {fp} | Acc: {tp / total}' \ '\n \t' + '\t'.join(self.classes) + '\tacc\ttotal' row_totals = self.matrix.sum(dim=1) row_accs = self.matrix.diagonal() / row_totals for char, row, acc, total in zip(self.classes, self.matrix, row_accs, row_totals): if total != 0: str_matrix += f'\n{char}\t' + '\t'.join(tensor_to_list(row)) + \ f'\t{tensor_to_string(acc)}\t{tensor_to_string(total)}' col_totals = self.matrix.sum(dim=0) col_accs = self.matrix.diagonal() / col_totals str_matrix += '\n \t' + '\t'.join(tensor_to_list(col_accs)) str_matrix += '\n \t' + '\t'.join(tensor_to_list(col_totals)) return str_matrix except: log.warning(f'Confusion Matrix failed for tensor of shape {self.matrix.shape}: {self.matrix}') def update(self, preds: Tensor, target: Tensor): # # this # uniques, count = torch.unique(torch.stack((targets, predictions)), return_counts=True) # t, p = uniques.unbind() # self.mat[t, p] = self.mat[t, p] + count # # or that for t, p in zip(target, preds): self.matrix[t, p] = self.matrix[t, p] + 1 def tensor_to_string(t: Tensor) -> str: return str(t.item()) def tensor_to_list(t: Tensor) -> Iterable[str]: return map(tensor_to_string, list(t))
en
0.467911
# width/time step, batch, channel # # this # uniques, count = torch.unique(torch.stack((targets, predictions)), return_counts=True) # t, p = uniques.unbind() # self.mat[t, p] = self.mat[t, p] + count # # or that
2.214712
2
sympy/thirdparty/pyglet/pyglet/lib.py
gnulinooks/sympy
1
6631494
'''Functions for loading dynamic libraries. These extend and correct ctypes functions. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: $' import os import re import sys import ctypes import ctypes.util class LibraryLoader(object): def load_library(self, *names, **kwargs): '''Find and load a library. More than one name can be specified, they will be tried in order. Platform-specific library names (given as kwargs) are tried first. Raises ImportError if library is not found. ''' if 'framework' in kwargs and self.platform == 'darwin': return self.load_framework(kwargs['framework']) platform_names = kwargs.get(self.platform, []) if type(platform_names) in (str, unicode): platform_names = [platform_names] elif type(platform_names) is tuple: platform_names = list(platform_names) if self.platform == 'linux2': platform_names.extend(['lib%s.so' % n for n in names]) platform_names.extend(names) for name in platform_names: try: return ctypes.cdll.LoadLibrary(name) except OSError: path = self.find_library(name) if path: try: return ctypes.cdll.LoadLibrary(path) except OSError: pass raise ImportError('Library "%s" not found.' % names[0]) find_library = lambda self, name: ctypes.util.find_library(name) platform = sys.platform if platform == 'cygwin': platform = 'win32' def load_framework(self, path): raise RuntimeError("Can't load framework on this platform.") class MachOLibraryLoader(LibraryLoader): def __init__(self): if 'LD_LIBRARY_PATH' in os.environ: self.ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':') else: self.ld_library_path = [] if 'DYLD_LIBRARY_PATH' in os.environ: self.dyld_library_path = os.environ['DYLD_LIBRARY_PATH'].split(':') else: self.dyld_library_path = [] if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ: self.dyld_fallback_library_path = \ os.environ['DYLD_FALLBACK_LIBRARY_PATH'].split(':') else: self.dyld_fallback_library_path = [ os.path.expanduser('~/lib'), '/usr/local/lib', '/usr/lib'] def find_library(self, path): '''Implements the dylib search as specified in Apple documentation: http://developer.apple.com/documentation/DeveloperTools/Conceptual/DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html ''' libname = os.path.basename(path) if '/' in path: search_path = ( [os.path.join(p, libname) \ for p in self.dyld_library_path] + [path] + [os.path.join(p, libname) \ for p in self.dyld_fallback_library_path]) else: search_path = ( [os.path.join(p, libname) \ for p in self.ld_library_path] + [os.path.join(p, libname) \ for p in self.dyld_library_path] + [path] + [os.path.join(p, libname) \ for p in self.dyld_fallback_library_path]) for path in search_path: if os.path.exists(path): return path return None def find_framework(self, path): '''Implement runtime framework search as described by: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/FrameworkBinding.html ''' # e.g. path == '/System/Library/Frameworks/OpenGL.framework' # name == 'OpenGL' # return '/System/Library/Frameworks/OpenGL.framework/OpenGL' name = os.path.splitext(os.path.split(path)[1])[0] realpath = os.path.join(path, name) if os.path.exists(realpath): return realpath for dir in ('/Library/Frameworks', '/System/Library/Frameworks'): realpath = os.path.join(dir, '%s.framework' % name, name) if os.path.exists(realpath): return realpath return None def load_framework(self, path): realpath = self.find_framework(path) if realpath: return ctypes.cdll.LoadLibrary(realpath) raise ImportError("Can't find framework %s." % path) class LinuxLibraryLoader(LibraryLoader): _ld_so_cache = None def _create_ld_so_cache(self): # Recreate search path followed by ld.so. This is going to be # slow to build, and incorrect (ld.so uses ld.so.cache, which may # not be up-to-date). Used only as fallback for distros without # /sbin/ldconfig. # # We assume the DT_RPATH and DT_RUNPATH binary sections are omitted. directories = [] try: directories.extend(os.environ['LD_LIBRARY_PATH'].split(':')) except KeyError: pass try: directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')]) except IOError: pass directories.extend(['/lib', '/usr/lib']) cache = {} lib_re = re.compile('lib(.*)\.so') for dir in directories: try: for file in os.listdir(dir): if '.so' not in file: continue # Index by filename path = os.path.join(dir, file) if file not in cache: cache[file] = path # Index by library name match = lib_re.match(file) if match: library = match.group(1) if library not in cache: cache[library] = path except OSError: pass self._ld_so_cache = cache def find_library(self, path): # ctypes tries ldconfig, gcc and objdump. If none of these are # present, we implement the ld-linux.so search path as described in # the man page. result = ctypes.util.find_library(path) if result: return result if self._ld_so_cache is None: self._create_ld_so_cache() return self._ld_so_cache.get(path) if sys.platform == 'darwin': loader = MachOLibraryLoader() elif sys.platform == 'linux2': loader = LinuxLibraryLoader() else: loader = LibraryLoader() load_library = loader.load_library
'''Functions for loading dynamic libraries. These extend and correct ctypes functions. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: $' import os import re import sys import ctypes import ctypes.util class LibraryLoader(object): def load_library(self, *names, **kwargs): '''Find and load a library. More than one name can be specified, they will be tried in order. Platform-specific library names (given as kwargs) are tried first. Raises ImportError if library is not found. ''' if 'framework' in kwargs and self.platform == 'darwin': return self.load_framework(kwargs['framework']) platform_names = kwargs.get(self.platform, []) if type(platform_names) in (str, unicode): platform_names = [platform_names] elif type(platform_names) is tuple: platform_names = list(platform_names) if self.platform == 'linux2': platform_names.extend(['lib%s.so' % n for n in names]) platform_names.extend(names) for name in platform_names: try: return ctypes.cdll.LoadLibrary(name) except OSError: path = self.find_library(name) if path: try: return ctypes.cdll.LoadLibrary(path) except OSError: pass raise ImportError('Library "%s" not found.' % names[0]) find_library = lambda self, name: ctypes.util.find_library(name) platform = sys.platform if platform == 'cygwin': platform = 'win32' def load_framework(self, path): raise RuntimeError("Can't load framework on this platform.") class MachOLibraryLoader(LibraryLoader): def __init__(self): if 'LD_LIBRARY_PATH' in os.environ: self.ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':') else: self.ld_library_path = [] if 'DYLD_LIBRARY_PATH' in os.environ: self.dyld_library_path = os.environ['DYLD_LIBRARY_PATH'].split(':') else: self.dyld_library_path = [] if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ: self.dyld_fallback_library_path = \ os.environ['DYLD_FALLBACK_LIBRARY_PATH'].split(':') else: self.dyld_fallback_library_path = [ os.path.expanduser('~/lib'), '/usr/local/lib', '/usr/lib'] def find_library(self, path): '''Implements the dylib search as specified in Apple documentation: http://developer.apple.com/documentation/DeveloperTools/Conceptual/DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html ''' libname = os.path.basename(path) if '/' in path: search_path = ( [os.path.join(p, libname) \ for p in self.dyld_library_path] + [path] + [os.path.join(p, libname) \ for p in self.dyld_fallback_library_path]) else: search_path = ( [os.path.join(p, libname) \ for p in self.ld_library_path] + [os.path.join(p, libname) \ for p in self.dyld_library_path] + [path] + [os.path.join(p, libname) \ for p in self.dyld_fallback_library_path]) for path in search_path: if os.path.exists(path): return path return None def find_framework(self, path): '''Implement runtime framework search as described by: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/FrameworkBinding.html ''' # e.g. path == '/System/Library/Frameworks/OpenGL.framework' # name == 'OpenGL' # return '/System/Library/Frameworks/OpenGL.framework/OpenGL' name = os.path.splitext(os.path.split(path)[1])[0] realpath = os.path.join(path, name) if os.path.exists(realpath): return realpath for dir in ('/Library/Frameworks', '/System/Library/Frameworks'): realpath = os.path.join(dir, '%s.framework' % name, name) if os.path.exists(realpath): return realpath return None def load_framework(self, path): realpath = self.find_framework(path) if realpath: return ctypes.cdll.LoadLibrary(realpath) raise ImportError("Can't find framework %s." % path) class LinuxLibraryLoader(LibraryLoader): _ld_so_cache = None def _create_ld_so_cache(self): # Recreate search path followed by ld.so. This is going to be # slow to build, and incorrect (ld.so uses ld.so.cache, which may # not be up-to-date). Used only as fallback for distros without # /sbin/ldconfig. # # We assume the DT_RPATH and DT_RUNPATH binary sections are omitted. directories = [] try: directories.extend(os.environ['LD_LIBRARY_PATH'].split(':')) except KeyError: pass try: directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')]) except IOError: pass directories.extend(['/lib', '/usr/lib']) cache = {} lib_re = re.compile('lib(.*)\.so') for dir in directories: try: for file in os.listdir(dir): if '.so' not in file: continue # Index by filename path = os.path.join(dir, file) if file not in cache: cache[file] = path # Index by library name match = lib_re.match(file) if match: library = match.group(1) if library not in cache: cache[library] = path except OSError: pass self._ld_so_cache = cache def find_library(self, path): # ctypes tries ldconfig, gcc and objdump. If none of these are # present, we implement the ld-linux.so search path as described in # the man page. result = ctypes.util.find_library(path) if result: return result if self._ld_so_cache is None: self._create_ld_so_cache() return self._ld_so_cache.get(path) if sys.platform == 'darwin': loader = MachOLibraryLoader() elif sys.platform == 'linux2': loader = LinuxLibraryLoader() else: loader = LibraryLoader() load_library = loader.load_library
en
0.831032
Functions for loading dynamic libraries. These extend and correct ctypes functions. Find and load a library. More than one name can be specified, they will be tried in order. Platform-specific library names (given as kwargs) are tried first. Raises ImportError if library is not found. Implements the dylib search as specified in Apple documentation: http://developer.apple.com/documentation/DeveloperTools/Conceptual/DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html Implement runtime framework search as described by: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/FrameworkBinding.html # e.g. path == '/System/Library/Frameworks/OpenGL.framework' # name == 'OpenGL' # return '/System/Library/Frameworks/OpenGL.framework/OpenGL' # Recreate search path followed by ld.so. This is going to be # slow to build, and incorrect (ld.so uses ld.so.cache, which may # not be up-to-date). Used only as fallback for distros without # /sbin/ldconfig. # # We assume the DT_RPATH and DT_RUNPATH binary sections are omitted. # Index by filename # Index by library name # ctypes tries ldconfig, gcc and objdump. If none of these are # present, we implement the ld-linux.so search path as described in # the man page.
3.05038
3
third_party/google/apputils/tests/datelib_unittest.py
lisagorewitdecker/immaculater
0
6631495
#!/usr/bin/env python # Copyright 2002 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for datelib.py module.""" import datetime import random import time import pytz from google.apputils import basetest from google.apputils import datelib class TimestampUnitTest(basetest.TestCase): seed = 1979 def testTzAwareSuccession(self): a = datelib.Timestamp.now() b = datelib.Timestamp.utcnow() self.assertLessEqual(a, b) def testTzRandomConversion(self): random.seed(self.seed) for unused_i in range(100): stz = pytz.timezone(random.choice(pytz.all_timezones)) a = datelib.Timestamp.FromString('2008-04-12T10:00:00', stz) b = a for unused_j in range(100): b = b.astimezone(pytz.timezone(random.choice(pytz.all_timezones))) self.assertEqual(a, b) random.seed() def testMicroTimestampConversion(self): """Test that f1(f2(a)) == a.""" def IsEq(x): self.assertEqual( x, datelib.Timestamp.FromMicroTimestamp(x).AsMicroTimestamp()) IsEq(0) IsEq(datelib.MAXIMUM_MICROSECOND_TIMESTAMP) random.seed(self.seed) for _ in range(100): IsEq(random.randint(0, datelib.MAXIMUM_MICROSECOND_TIMESTAMP)) def testMicroTimestampKnown(self): self.assertEqual(0, datelib.Timestamp.FromString( '1970-01-01T00:00:00', pytz.UTC).AsMicroTimestamp()) self.assertEqual( datelib.MAXIMUM_MICROSECOND_TIMESTAMP, datelib.MAXIMUM_MICROSECOND_TIMESTAMP_AS_TS.AsMicroTimestamp()) def testMicroTimestampOrdering(self): """Test that cmp(a, b) == cmp(f1(a), f1(b)).""" def IsEq(a, b): self.assertEqual( cmp(a, b), cmp(datelib.Timestamp.FromMicroTimestamp(a), datelib.Timestamp.FromMicroTimestamp(b))) random.seed(self.seed) for unused_i in range(100): IsEq( random.randint(0, datelib.MAXIMUM_MICROSECOND_TIMESTAMP), random.randint(0, datelib.MAXIMUM_MICROSECOND_TIMESTAMP)) def testCombine(self): for tz in (datelib.UTC, datelib.US_PACIFIC): self.assertEqual( tz.localize(datelib.Timestamp(1970, 1, 1, 0, 0, 0, 0)), datelib.Timestamp.combine( datelib.datetime.date(1970, 1, 1), datelib.datetime.time(0, 0, 0), tz)) self.assertEqual( tz.localize(datelib.Timestamp(9998, 12, 31, 23, 59, 59, 999999)), datelib.Timestamp.combine( datelib.datetime.date(9998, 12, 31), datelib.datetime.time(23, 59, 59, 999999), tz)) def testStrpTime(self): time_str = '20130829 23:43:19.206' time_fmt = '%Y%m%d %H:%M:%S.%f' expected = datelib.Timestamp(2013, 8, 29, 23, 43, 19, 206000) for tz in (datelib.UTC, datelib.US_PACIFIC): if tz == datelib.LocalTimezone: actual = datelib.Timestamp.strptime(time_str, time_fmt) else: actual = datelib.Timestamp.strptime(time_str, time_fmt, tz) self.assertEqual(tz.localize(expected), actual) def testFromString1(self): for string_zero in ( '1970-01-01 00:00:00', '19700101T000000', '1970-01-01T00:00:00' ): for testtz in (datelib.UTC, datelib.US_PACIFIC): self.assertEqual( datelib.Timestamp.FromString(string_zero, testtz), testtz.localize(datelib.Timestamp(1970, 1, 1, 0, 0, 0, 0))) self.assertEqual( datelib.Timestamp.FromString( '1970-01-01T00:00:00+0000', datelib.US_PACIFIC), datelib.UTC.localize(datelib.Timestamp(1970, 1, 1, 0, 0, 0, 0))) startdate = datelib.US_PACIFIC.localize( datelib.Timestamp(2009, 1, 1, 3, 0, 0, 0)) for day in range(1, 366): self.assertEqual( datelib.Timestamp.FromString(startdate.isoformat()), startdate, 'FromString works for day %d since 2009-01-01' % day) startdate += datelib.datetime.timedelta(days=1) def testFromString2(self): """Test correctness of parsing the local time in a given timezone. The result shall always be the same as tz.localize(naive_time). """ baseday = datelib.datetime.date(2009, 1, 1).toordinal() for day_offset in range(0, 365): day = datelib.datetime.date.fromordinal(baseday + day_offset) naive_day = datelib.datetime.datetime.combine( day, datelib.datetime.time(0, 45, 9)) naive_day_str = naive_day.strftime('%Y-%m-%dT%H:%M:%S') self.assertEqual( datelib.US_PACIFIC.localize(naive_day), datelib.Timestamp.FromString(naive_day_str, tz=datelib.US_PACIFIC), 'FromString localizes time incorrectly') def testFromStringInterval(self): expected_date = datetime.datetime.utcnow() - datetime.timedelta(days=1) expected_s = time.mktime(expected_date.utctimetuple()) actual_date = datelib.Timestamp.FromString('1d') actual_s = time.mktime(actual_date.timetuple()) diff_seconds = actual_s - expected_s self.assertBetween(diff_seconds, 0, 1) self.assertRaises( datelib.TimeParseError, datelib.Timestamp.FromString, 'wat') def _EpochToDatetime(t, tz=None): if tz is not None: return datelib.datetime.datetime.fromtimestamp(t, tz) else: return datelib.datetime.datetime.utcfromtimestamp(t) class DatetimeConversionUnitTest(basetest.TestCase): def setUp(self): self.pst = pytz.timezone('US/Pacific') self.utc = pytz.utc self.now = time.time() def testDatetimeToUTCMicros(self): self.assertEqual( 0, datelib.DatetimeToUTCMicros(_EpochToDatetime(0))) self.assertEqual( 1001 * long(datelib._MICROSECONDS_PER_SECOND), datelib.DatetimeToUTCMicros(_EpochToDatetime(1001))) self.assertEqual(long(self.now * datelib._MICROSECONDS_PER_SECOND), datelib.DatetimeToUTCMicros(_EpochToDatetime(self.now))) # tzinfo shouldn't change the result self.assertEqual( 0, datelib.DatetimeToUTCMicros(_EpochToDatetime(0, tz=self.pst))) def testDatetimeToUTCMillis(self): self.assertEqual( 0, datelib.DatetimeToUTCMillis(_EpochToDatetime(0))) self.assertEqual( 1001 * 1000L, datelib.DatetimeToUTCMillis(_EpochToDatetime(1001))) self.assertEqual(long(self.now * 1000), datelib.DatetimeToUTCMillis(_EpochToDatetime(self.now))) # tzinfo shouldn't change the result self.assertEqual( 0, datelib.DatetimeToUTCMillis(_EpochToDatetime(0, tz=self.pst))) def testUTCMicrosToDatetime(self): self.assertEqual(_EpochToDatetime(0), datelib.UTCMicrosToDatetime(0)) self.assertEqual(_EpochToDatetime(1.000001), datelib.UTCMicrosToDatetime(1000001)) self.assertEqual(_EpochToDatetime(self.now), datelib.UTCMicrosToDatetime( long(self.now * datelib._MICROSECONDS_PER_SECOND))) # Check timezone-aware comparisons self.assertEqual(_EpochToDatetime(0, self.pst), datelib.UTCMicrosToDatetime(0, tz=self.pst)) self.assertEqual(_EpochToDatetime(0, self.pst), datelib.UTCMicrosToDatetime(0, tz=self.utc)) def testUTCMillisToDatetime(self): self.assertEqual(_EpochToDatetime(0), datelib.UTCMillisToDatetime(0)) self.assertEqual(_EpochToDatetime(1.001), datelib.UTCMillisToDatetime(1001)) t = time.time() dt = _EpochToDatetime(t) # truncate sub-milli time dt -= datelib.datetime.timedelta(microseconds=dt.microsecond % 1000) self.assertEqual(dt, datelib.UTCMillisToDatetime(long(t * 1000))) # Check timezone-aware comparisons self.assertEqual(_EpochToDatetime(0, self.pst), datelib.UTCMillisToDatetime(0, tz=self.pst)) self.assertEqual(_EpochToDatetime(0, self.pst), datelib.UTCMillisToDatetime(0, tz=self.utc)) class MicrosecondsToSecondsUnitTest(basetest.TestCase): def testConversionFromMicrosecondsToSeconds(self): self.assertEqual(0.0, datelib.MicrosecondsToSeconds(0)) self.assertEqual(7.0, datelib.MicrosecondsToSeconds(7000000)) self.assertEqual(1.234567, datelib.MicrosecondsToSeconds(1234567)) self.assertEqual(12345654321.123456, datelib.MicrosecondsToSeconds(12345654321123456)) if __name__ == '__main__': basetest.main()
#!/usr/bin/env python # Copyright 2002 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for datelib.py module.""" import datetime import random import time import pytz from google.apputils import basetest from google.apputils import datelib class TimestampUnitTest(basetest.TestCase): seed = 1979 def testTzAwareSuccession(self): a = datelib.Timestamp.now() b = datelib.Timestamp.utcnow() self.assertLessEqual(a, b) def testTzRandomConversion(self): random.seed(self.seed) for unused_i in range(100): stz = pytz.timezone(random.choice(pytz.all_timezones)) a = datelib.Timestamp.FromString('2008-04-12T10:00:00', stz) b = a for unused_j in range(100): b = b.astimezone(pytz.timezone(random.choice(pytz.all_timezones))) self.assertEqual(a, b) random.seed() def testMicroTimestampConversion(self): """Test that f1(f2(a)) == a.""" def IsEq(x): self.assertEqual( x, datelib.Timestamp.FromMicroTimestamp(x).AsMicroTimestamp()) IsEq(0) IsEq(datelib.MAXIMUM_MICROSECOND_TIMESTAMP) random.seed(self.seed) for _ in range(100): IsEq(random.randint(0, datelib.MAXIMUM_MICROSECOND_TIMESTAMP)) def testMicroTimestampKnown(self): self.assertEqual(0, datelib.Timestamp.FromString( '1970-01-01T00:00:00', pytz.UTC).AsMicroTimestamp()) self.assertEqual( datelib.MAXIMUM_MICROSECOND_TIMESTAMP, datelib.MAXIMUM_MICROSECOND_TIMESTAMP_AS_TS.AsMicroTimestamp()) def testMicroTimestampOrdering(self): """Test that cmp(a, b) == cmp(f1(a), f1(b)).""" def IsEq(a, b): self.assertEqual( cmp(a, b), cmp(datelib.Timestamp.FromMicroTimestamp(a), datelib.Timestamp.FromMicroTimestamp(b))) random.seed(self.seed) for unused_i in range(100): IsEq( random.randint(0, datelib.MAXIMUM_MICROSECOND_TIMESTAMP), random.randint(0, datelib.MAXIMUM_MICROSECOND_TIMESTAMP)) def testCombine(self): for tz in (datelib.UTC, datelib.US_PACIFIC): self.assertEqual( tz.localize(datelib.Timestamp(1970, 1, 1, 0, 0, 0, 0)), datelib.Timestamp.combine( datelib.datetime.date(1970, 1, 1), datelib.datetime.time(0, 0, 0), tz)) self.assertEqual( tz.localize(datelib.Timestamp(9998, 12, 31, 23, 59, 59, 999999)), datelib.Timestamp.combine( datelib.datetime.date(9998, 12, 31), datelib.datetime.time(23, 59, 59, 999999), tz)) def testStrpTime(self): time_str = '20130829 23:43:19.206' time_fmt = '%Y%m%d %H:%M:%S.%f' expected = datelib.Timestamp(2013, 8, 29, 23, 43, 19, 206000) for tz in (datelib.UTC, datelib.US_PACIFIC): if tz == datelib.LocalTimezone: actual = datelib.Timestamp.strptime(time_str, time_fmt) else: actual = datelib.Timestamp.strptime(time_str, time_fmt, tz) self.assertEqual(tz.localize(expected), actual) def testFromString1(self): for string_zero in ( '1970-01-01 00:00:00', '19700101T000000', '1970-01-01T00:00:00' ): for testtz in (datelib.UTC, datelib.US_PACIFIC): self.assertEqual( datelib.Timestamp.FromString(string_zero, testtz), testtz.localize(datelib.Timestamp(1970, 1, 1, 0, 0, 0, 0))) self.assertEqual( datelib.Timestamp.FromString( '1970-01-01T00:00:00+0000', datelib.US_PACIFIC), datelib.UTC.localize(datelib.Timestamp(1970, 1, 1, 0, 0, 0, 0))) startdate = datelib.US_PACIFIC.localize( datelib.Timestamp(2009, 1, 1, 3, 0, 0, 0)) for day in range(1, 366): self.assertEqual( datelib.Timestamp.FromString(startdate.isoformat()), startdate, 'FromString works for day %d since 2009-01-01' % day) startdate += datelib.datetime.timedelta(days=1) def testFromString2(self): """Test correctness of parsing the local time in a given timezone. The result shall always be the same as tz.localize(naive_time). """ baseday = datelib.datetime.date(2009, 1, 1).toordinal() for day_offset in range(0, 365): day = datelib.datetime.date.fromordinal(baseday + day_offset) naive_day = datelib.datetime.datetime.combine( day, datelib.datetime.time(0, 45, 9)) naive_day_str = naive_day.strftime('%Y-%m-%dT%H:%M:%S') self.assertEqual( datelib.US_PACIFIC.localize(naive_day), datelib.Timestamp.FromString(naive_day_str, tz=datelib.US_PACIFIC), 'FromString localizes time incorrectly') def testFromStringInterval(self): expected_date = datetime.datetime.utcnow() - datetime.timedelta(days=1) expected_s = time.mktime(expected_date.utctimetuple()) actual_date = datelib.Timestamp.FromString('1d') actual_s = time.mktime(actual_date.timetuple()) diff_seconds = actual_s - expected_s self.assertBetween(diff_seconds, 0, 1) self.assertRaises( datelib.TimeParseError, datelib.Timestamp.FromString, 'wat') def _EpochToDatetime(t, tz=None): if tz is not None: return datelib.datetime.datetime.fromtimestamp(t, tz) else: return datelib.datetime.datetime.utcfromtimestamp(t) class DatetimeConversionUnitTest(basetest.TestCase): def setUp(self): self.pst = pytz.timezone('US/Pacific') self.utc = pytz.utc self.now = time.time() def testDatetimeToUTCMicros(self): self.assertEqual( 0, datelib.DatetimeToUTCMicros(_EpochToDatetime(0))) self.assertEqual( 1001 * long(datelib._MICROSECONDS_PER_SECOND), datelib.DatetimeToUTCMicros(_EpochToDatetime(1001))) self.assertEqual(long(self.now * datelib._MICROSECONDS_PER_SECOND), datelib.DatetimeToUTCMicros(_EpochToDatetime(self.now))) # tzinfo shouldn't change the result self.assertEqual( 0, datelib.DatetimeToUTCMicros(_EpochToDatetime(0, tz=self.pst))) def testDatetimeToUTCMillis(self): self.assertEqual( 0, datelib.DatetimeToUTCMillis(_EpochToDatetime(0))) self.assertEqual( 1001 * 1000L, datelib.DatetimeToUTCMillis(_EpochToDatetime(1001))) self.assertEqual(long(self.now * 1000), datelib.DatetimeToUTCMillis(_EpochToDatetime(self.now))) # tzinfo shouldn't change the result self.assertEqual( 0, datelib.DatetimeToUTCMillis(_EpochToDatetime(0, tz=self.pst))) def testUTCMicrosToDatetime(self): self.assertEqual(_EpochToDatetime(0), datelib.UTCMicrosToDatetime(0)) self.assertEqual(_EpochToDatetime(1.000001), datelib.UTCMicrosToDatetime(1000001)) self.assertEqual(_EpochToDatetime(self.now), datelib.UTCMicrosToDatetime( long(self.now * datelib._MICROSECONDS_PER_SECOND))) # Check timezone-aware comparisons self.assertEqual(_EpochToDatetime(0, self.pst), datelib.UTCMicrosToDatetime(0, tz=self.pst)) self.assertEqual(_EpochToDatetime(0, self.pst), datelib.UTCMicrosToDatetime(0, tz=self.utc)) def testUTCMillisToDatetime(self): self.assertEqual(_EpochToDatetime(0), datelib.UTCMillisToDatetime(0)) self.assertEqual(_EpochToDatetime(1.001), datelib.UTCMillisToDatetime(1001)) t = time.time() dt = _EpochToDatetime(t) # truncate sub-milli time dt -= datelib.datetime.timedelta(microseconds=dt.microsecond % 1000) self.assertEqual(dt, datelib.UTCMillisToDatetime(long(t * 1000))) # Check timezone-aware comparisons self.assertEqual(_EpochToDatetime(0, self.pst), datelib.UTCMillisToDatetime(0, tz=self.pst)) self.assertEqual(_EpochToDatetime(0, self.pst), datelib.UTCMillisToDatetime(0, tz=self.utc)) class MicrosecondsToSecondsUnitTest(basetest.TestCase): def testConversionFromMicrosecondsToSeconds(self): self.assertEqual(0.0, datelib.MicrosecondsToSeconds(0)) self.assertEqual(7.0, datelib.MicrosecondsToSeconds(7000000)) self.assertEqual(1.234567, datelib.MicrosecondsToSeconds(1234567)) self.assertEqual(12345654321.123456, datelib.MicrosecondsToSeconds(12345654321123456)) if __name__ == '__main__': basetest.main()
en
0.773007
#!/usr/bin/env python # Copyright 2002 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Unittest for datelib.py module. Test that f1(f2(a)) == a. Test that cmp(a, b) == cmp(f1(a), f1(b)). Test correctness of parsing the local time in a given timezone. The result shall always be the same as tz.localize(naive_time). # tzinfo shouldn't change the result # tzinfo shouldn't change the result # Check timezone-aware comparisons # truncate sub-milli time # Check timezone-aware comparisons
2.290255
2
scenario/quadcopter.py
HKUST-JM/iLQR_Traj_Trac
0
6631496
from math import sin import numpy as np import sympy as sp from .dynamic_model import DynamicModelBase from utils.Logger import logger import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib as mpl import math class QuadCopter(DynamicModelBase): def __init__(self, is_with_constraints = True, T = 100): ##### Dynamic Function ######## n, m = 12, 4 # number of state = 12, number of action = 4, prediction horizon = 100 h_constant = 0.02 # sampling time x_u_var = sp.symbols('x_u:16') ueq = 1.962 # p_x p_y p_z # v_x v_y v_z # phi(6) theta(7) psi(8) # omega_x omega_y omega_z # f1 f2 f3 f4 Jx = 0.0244 Jy = 0.0244 Jz = 0.0436 mass = 0.8 g_constant = 9.81 L_constant = 0.165 # m c_constant = 0.002167 # m cos_phi = sp.cos(x_u_var[6]) sin_phi = sp.sin(x_u_var[6]) cos_theta = sp.cos(x_u_var[7]) sin_theta = sp.sin(x_u_var[7]) cos_psi = sp.cos(x_u_var[8]) sin_psi = sp.sin(x_u_var[8]) e_constant = np.asarray([0,0,1]).reshape(-1,1) R_matrix = sp.Matrix([[cos_theta*cos_psi, cos_theta*sin_psi, -sin_theta], [sin_phi*sin_theta*cos_psi-cos_phi*sin_psi, sin_phi*sin_theta*sin_psi+cos_phi*cos_psi, sin_phi*cos_theta], [cos_phi*sin_theta*cos_psi+sin_phi*sin_psi, cos_phi*sin_theta*sin_psi-sin_phi*cos_psi, cos_phi*cos_theta]]) W_matrix = sp.Matrix([[1.0, sin_phi*sin_theta/cos_theta, cos_phi*sin_theta/cos_theta], [0.0, cos_phi, -sin_phi], [0.0, sin_phi/cos_theta, cos_phi/cos_theta]]) J_matrix = np.diag([Jx, Jy, Jz]) pos = sp.Matrix([[x_u_var[0]], [x_u_var[1]], [x_u_var[2]]]) vel = sp.Matrix([[x_u_var[3]], [x_u_var[4]], [x_u_var[5]]]) ang = sp.Matrix([[x_u_var[6]], [x_u_var[7]], [x_u_var[8]]]) ang_vel = sp.Matrix([[x_u_var[9]], [x_u_var[10]], [x_u_var[11]]]) # Dynamics params pos_dot = R_matrix.T * vel vel_dot = -ang_vel.cross(vel) + R_matrix @ (g_constant * e_constant) ang_dot = W_matrix * ang_vel angvel_dot = np.linalg.inv(J_matrix) @ (-ang_vel.cross(J_matrix * ang_vel)) # Make constant Bc matrix Bc = np.zeros((12, 4)) Bc[5, 0] = -1.0/mass Bc[5, 1] = -1.0/mass Bc[5, 2] = -1.0/mass Bc[5, 3] = -1.0/mass Bc[9, 1] = -L_constant/Jx Bc[9, 3] = L_constant/Jx Bc[10, 0] = L_constant/Jy Bc[10, 2] = -L_constant/Jy Bc[11, 0] = -c_constant/Jz Bc[11, 1] = c_constant/Jz Bc[11, 2] = -c_constant/Jz Bc[11, 3] = c_constant/Jz dynamic_function = sp.Matrix([ pos + pos_dot * h_constant, vel + vel_dot * h_constant, ang + ang_dot * h_constant, ang_vel + angvel_dot * h_constant]) + h_constant * Bc * sp.Matrix([[x_u_var[12] + ueq], [x_u_var[13] + ueq], [x_u_var[14] + ueq], [x_u_var[15] + ueq]]) init_state = np.asarray([0,0,0,0,0,0,0,0,0,0,0,0],dtype=np.float64).reshape(-1,1) init_action = np.zeros((T,m,1)) if is_with_constraints: box_constr = np.asarray([ [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.pi/2, np.pi/2], [-np.pi/2, np.pi/2], [-np.pi, np.pi], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-2, 2], [-2, 2], [-2, 2], [-2, 2]]) other_constr = [-((x_u_var[0] - 0.3)**2 + (x_u_var[1] - 0.3)**2 + (x_u_var[2] - 0.3)**2 - 0.01), -((x_u_var[0] - 0.5)**2 + (x_u_var[1] - 0.5)**2 + (x_u_var[2] - 0.6)**2 - 0.01), -((x_u_var[0] - 0.7)**2 + (x_u_var[1] - 0.7)**2 + (x_u_var[2] - 0.7)**2 - 0.01)] else: box_constr = np.asarray([ [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) other_constr = [] ##### Objective Function ######## position_var = sp.symbols("p:3") # x and y add_param = np.hstack([np.ones(T).reshape(-1,1), np.ones(T).reshape(-1,1), np.ones(T).reshape(-1,1)]) C_matrix = np.diag(np.zeros(16)) C_matrix[0,0] = C_matrix[1,1] = C_matrix[2,2] = 10 C_matrix[3,3] = C_matrix[4,4] = C_matrix[5,5] = 1 r_vector = np.asarray([ position_var[0], position_var[1], position_var[2], 0.,0.,0., 0.,0.,0., 0.,0.,0., 0.,0.,0.,0.]) obj_fun = (x_u_var - r_vector)@C_matrix@(x_u_var - r_vector) super().__init__( dynamic_function=sp.Array(dynamic_function)[:,0], x_u_var = x_u_var, box_constr = box_constr, other_constr = other_constr, init_state = init_state, init_action = init_action, obj_fun = obj_fun, add_param_var= position_var, add_param= add_param) def rotation_matrix(self,angles): ct = math.cos(angles[0]) cp = math.cos(angles[1]) cg = math.cos(angles[2]) st = math.sin(angles[0]) sp = math.sin(angles[1]) sg = math.sin(angles[2]) R_x = np.array([[1,0,0],[0,ct,-st],[0,st,ct]]) R_y = np.array([[cp,0,sp],[0,1,0],[-sp,0,cp]]) R_z = np.array([[cg,-sg,0],[sg,cg,0],[0,0,1]]) R = np.dot(R_z, np.dot( R_y, R_x )) return R def update(self): for key in self.quads: R = self.rotation_matrix(self.quads[key]['orientation']) L = self.quads[key]['L'] points = np.array([ [-L,0,0], [L,0,0], [0,-L,0], [0,L,0], [0,0,0], [0,0,0] ]).T points = np.dot(R,points) points[0,:] += self.quads[key]['position'][0] points[1,:] += self.quads[key]['position'][1] points[2,:] += self.quads[key]['position'][2] self.quads[key]['l1'].set_data(points[0,0:2],points[1,0:2]) self.quads[key]['l1'].set_3d_properties(points[2,0:2]) self.quads[key]['l2'].set_data(points[0,2:4],points[1,2:4]) self.quads[key]['l2'].set_3d_properties(points[2,2:4]) self.quads[key]['hub'].set_data(points[0,5],points[1,5]) self.quads[key]['hub'].set_3d_properties(points[2,5]) plt.pause(0.000000000000001) def play(self, logger_folder=None, no_iter = -1): """ If logger_folder exists and the result file is saved, then the specific iteration can be chosen to play the animation. \\ Parameter ---------- logger_folder : string The name of the logger folder no_iter : int The number of iteration to play the animation """ fig, ax = super().create_plot(figsize=(8, 8), xlim=(0,1.2), ylim=(0,1.2), zlim=(0,1.2), is_3d=True, is_equal = False) def draw_sphere(xx,yy,zz,rr): u, v = np.mgrid[0:2*np.pi:10j, 0:np.pi:10j] x = xx + rr*np.cos(u)*np.sin(v) y = yy + rr*np.sin(u)*np.sin(v) z = zz + rr*np.cos(v) ax.plot_wireframe(x, y, z, color="silver", alpha = 0.6) draw_sphere(0.3, 0.3, 0.3, 0.1) draw_sphere(0.5, 0.5, 0.6, 0.1) draw_sphere(0.7, 0.7, 0.7, 0.1) self.quads = {'q1':{'position':[0,0,0],'orientation':[0,0,0],'L':0.1}} for key in self.quads: self.quads[key]['l1'], = ax.plot([],[],[],color='deepskyblue',linewidth=3,antialiased=False) self.quads[key]['l2'], = ax.plot([],[],[],color='skyblue',linewidth=3,antialiased=False) self.quads[key]['hub'], = ax.plot([],[],[],marker='o',color='orange', markersize = 10, antialiased=False) trajectory = np.asarray(logger.read_from_json(logger_folder, no_iter)["trajectory"]) ax.plot3D(trajectory[:,0,0], trajectory[:,1,0], trajectory[:,2,0], color = 'lightcoral') self._is_interrupted=False for i in range(self.T): # car.center = trajectory[i,0,0], trajectory[i,1,0] self.quads['q1']['position'] = [trajectory[i,0,0], trajectory[i,1,0], trajectory[i,2,0]] self.quads['q1']['orientation'] = [trajectory[i,6,0], trajectory[i,7,0], trajectory[i,8,0]] self.update() if self._is_interrupted: return self._is_interrupted = True
from math import sin import numpy as np import sympy as sp from .dynamic_model import DynamicModelBase from utils.Logger import logger import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib as mpl import math class QuadCopter(DynamicModelBase): def __init__(self, is_with_constraints = True, T = 100): ##### Dynamic Function ######## n, m = 12, 4 # number of state = 12, number of action = 4, prediction horizon = 100 h_constant = 0.02 # sampling time x_u_var = sp.symbols('x_u:16') ueq = 1.962 # p_x p_y p_z # v_x v_y v_z # phi(6) theta(7) psi(8) # omega_x omega_y omega_z # f1 f2 f3 f4 Jx = 0.0244 Jy = 0.0244 Jz = 0.0436 mass = 0.8 g_constant = 9.81 L_constant = 0.165 # m c_constant = 0.002167 # m cos_phi = sp.cos(x_u_var[6]) sin_phi = sp.sin(x_u_var[6]) cos_theta = sp.cos(x_u_var[7]) sin_theta = sp.sin(x_u_var[7]) cos_psi = sp.cos(x_u_var[8]) sin_psi = sp.sin(x_u_var[8]) e_constant = np.asarray([0,0,1]).reshape(-1,1) R_matrix = sp.Matrix([[cos_theta*cos_psi, cos_theta*sin_psi, -sin_theta], [sin_phi*sin_theta*cos_psi-cos_phi*sin_psi, sin_phi*sin_theta*sin_psi+cos_phi*cos_psi, sin_phi*cos_theta], [cos_phi*sin_theta*cos_psi+sin_phi*sin_psi, cos_phi*sin_theta*sin_psi-sin_phi*cos_psi, cos_phi*cos_theta]]) W_matrix = sp.Matrix([[1.0, sin_phi*sin_theta/cos_theta, cos_phi*sin_theta/cos_theta], [0.0, cos_phi, -sin_phi], [0.0, sin_phi/cos_theta, cos_phi/cos_theta]]) J_matrix = np.diag([Jx, Jy, Jz]) pos = sp.Matrix([[x_u_var[0]], [x_u_var[1]], [x_u_var[2]]]) vel = sp.Matrix([[x_u_var[3]], [x_u_var[4]], [x_u_var[5]]]) ang = sp.Matrix([[x_u_var[6]], [x_u_var[7]], [x_u_var[8]]]) ang_vel = sp.Matrix([[x_u_var[9]], [x_u_var[10]], [x_u_var[11]]]) # Dynamics params pos_dot = R_matrix.T * vel vel_dot = -ang_vel.cross(vel) + R_matrix @ (g_constant * e_constant) ang_dot = W_matrix * ang_vel angvel_dot = np.linalg.inv(J_matrix) @ (-ang_vel.cross(J_matrix * ang_vel)) # Make constant Bc matrix Bc = np.zeros((12, 4)) Bc[5, 0] = -1.0/mass Bc[5, 1] = -1.0/mass Bc[5, 2] = -1.0/mass Bc[5, 3] = -1.0/mass Bc[9, 1] = -L_constant/Jx Bc[9, 3] = L_constant/Jx Bc[10, 0] = L_constant/Jy Bc[10, 2] = -L_constant/Jy Bc[11, 0] = -c_constant/Jz Bc[11, 1] = c_constant/Jz Bc[11, 2] = -c_constant/Jz Bc[11, 3] = c_constant/Jz dynamic_function = sp.Matrix([ pos + pos_dot * h_constant, vel + vel_dot * h_constant, ang + ang_dot * h_constant, ang_vel + angvel_dot * h_constant]) + h_constant * Bc * sp.Matrix([[x_u_var[12] + ueq], [x_u_var[13] + ueq], [x_u_var[14] + ueq], [x_u_var[15] + ueq]]) init_state = np.asarray([0,0,0,0,0,0,0,0,0,0,0,0],dtype=np.float64).reshape(-1,1) init_action = np.zeros((T,m,1)) if is_with_constraints: box_constr = np.asarray([ [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.pi/2, np.pi/2], [-np.pi/2, np.pi/2], [-np.pi, np.pi], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-2, 2], [-2, 2], [-2, 2], [-2, 2]]) other_constr = [-((x_u_var[0] - 0.3)**2 + (x_u_var[1] - 0.3)**2 + (x_u_var[2] - 0.3)**2 - 0.01), -((x_u_var[0] - 0.5)**2 + (x_u_var[1] - 0.5)**2 + (x_u_var[2] - 0.6)**2 - 0.01), -((x_u_var[0] - 0.7)**2 + (x_u_var[1] - 0.7)**2 + (x_u_var[2] - 0.7)**2 - 0.01)] else: box_constr = np.asarray([ [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) other_constr = [] ##### Objective Function ######## position_var = sp.symbols("p:3") # x and y add_param = np.hstack([np.ones(T).reshape(-1,1), np.ones(T).reshape(-1,1), np.ones(T).reshape(-1,1)]) C_matrix = np.diag(np.zeros(16)) C_matrix[0,0] = C_matrix[1,1] = C_matrix[2,2] = 10 C_matrix[3,3] = C_matrix[4,4] = C_matrix[5,5] = 1 r_vector = np.asarray([ position_var[0], position_var[1], position_var[2], 0.,0.,0., 0.,0.,0., 0.,0.,0., 0.,0.,0.,0.]) obj_fun = (x_u_var - r_vector)@C_matrix@(x_u_var - r_vector) super().__init__( dynamic_function=sp.Array(dynamic_function)[:,0], x_u_var = x_u_var, box_constr = box_constr, other_constr = other_constr, init_state = init_state, init_action = init_action, obj_fun = obj_fun, add_param_var= position_var, add_param= add_param) def rotation_matrix(self,angles): ct = math.cos(angles[0]) cp = math.cos(angles[1]) cg = math.cos(angles[2]) st = math.sin(angles[0]) sp = math.sin(angles[1]) sg = math.sin(angles[2]) R_x = np.array([[1,0,0],[0,ct,-st],[0,st,ct]]) R_y = np.array([[cp,0,sp],[0,1,0],[-sp,0,cp]]) R_z = np.array([[cg,-sg,0],[sg,cg,0],[0,0,1]]) R = np.dot(R_z, np.dot( R_y, R_x )) return R def update(self): for key in self.quads: R = self.rotation_matrix(self.quads[key]['orientation']) L = self.quads[key]['L'] points = np.array([ [-L,0,0], [L,0,0], [0,-L,0], [0,L,0], [0,0,0], [0,0,0] ]).T points = np.dot(R,points) points[0,:] += self.quads[key]['position'][0] points[1,:] += self.quads[key]['position'][1] points[2,:] += self.quads[key]['position'][2] self.quads[key]['l1'].set_data(points[0,0:2],points[1,0:2]) self.quads[key]['l1'].set_3d_properties(points[2,0:2]) self.quads[key]['l2'].set_data(points[0,2:4],points[1,2:4]) self.quads[key]['l2'].set_3d_properties(points[2,2:4]) self.quads[key]['hub'].set_data(points[0,5],points[1,5]) self.quads[key]['hub'].set_3d_properties(points[2,5]) plt.pause(0.000000000000001) def play(self, logger_folder=None, no_iter = -1): """ If logger_folder exists and the result file is saved, then the specific iteration can be chosen to play the animation. \\ Parameter ---------- logger_folder : string The name of the logger folder no_iter : int The number of iteration to play the animation """ fig, ax = super().create_plot(figsize=(8, 8), xlim=(0,1.2), ylim=(0,1.2), zlim=(0,1.2), is_3d=True, is_equal = False) def draw_sphere(xx,yy,zz,rr): u, v = np.mgrid[0:2*np.pi:10j, 0:np.pi:10j] x = xx + rr*np.cos(u)*np.sin(v) y = yy + rr*np.sin(u)*np.sin(v) z = zz + rr*np.cos(v) ax.plot_wireframe(x, y, z, color="silver", alpha = 0.6) draw_sphere(0.3, 0.3, 0.3, 0.1) draw_sphere(0.5, 0.5, 0.6, 0.1) draw_sphere(0.7, 0.7, 0.7, 0.1) self.quads = {'q1':{'position':[0,0,0],'orientation':[0,0,0],'L':0.1}} for key in self.quads: self.quads[key]['l1'], = ax.plot([],[],[],color='deepskyblue',linewidth=3,antialiased=False) self.quads[key]['l2'], = ax.plot([],[],[],color='skyblue',linewidth=3,antialiased=False) self.quads[key]['hub'], = ax.plot([],[],[],marker='o',color='orange', markersize = 10, antialiased=False) trajectory = np.asarray(logger.read_from_json(logger_folder, no_iter)["trajectory"]) ax.plot3D(trajectory[:,0,0], trajectory[:,1,0], trajectory[:,2,0], color = 'lightcoral') self._is_interrupted=False for i in range(self.T): # car.center = trajectory[i,0,0], trajectory[i,1,0] self.quads['q1']['position'] = [trajectory[i,0,0], trajectory[i,1,0], trajectory[i,2,0]] self.quads['q1']['orientation'] = [trajectory[i,6,0], trajectory[i,7,0], trajectory[i,8,0]] self.update() if self._is_interrupted: return self._is_interrupted = True
en
0.577721
##### Dynamic Function ######## # number of state = 12, number of action = 4, prediction horizon = 100 # sampling time # p_x p_y p_z # v_x v_y v_z # phi(6) theta(7) psi(8) # omega_x omega_y omega_z # f1 f2 f3 f4 # m # m # Dynamics params # Make constant Bc matrix ##### Objective Function ######## # x and y If logger_folder exists and the result file is saved, then the specific iteration can be chosen to play the animation. \\ Parameter ---------- logger_folder : string The name of the logger folder no_iter : int The number of iteration to play the animation # car.center = trajectory[i,0,0], trajectory[i,1,0]
2.49758
2
tests/test_funcs.py
Alcampopiano/OpenQuestion
4
6631497
import os import threading import time from server_code.server_surveys import * import anvil.server import pytest import uuid from datetime import datetime import pandas as pd import io # basic survey schema={ "title": "simple survey", "settings": { "survey_color": "#2196F3", "thank_you_msg": "#Thank you!" }, "num_widgets": 2, "widgets": [ { "id": 0, "type": "section", "logic": None, "title": "section", "widgets": [ { "id": 1, "type": "text_box", "logic": None, "title": "what's your name?", "number": False, "mandatory": True, "placeholder": "placeholder here" } ] } ] } @pytest.fixture(scope="session", autouse=True) def set_up_and_tear_down(): """ Everything before "yield" is run before any tests Everything after "yield" is run after all tests have finished """ # kill process on port in case it is running os.system("fuser -k 3030/tcp") # func to call app server def start_server(): os.system("anvil-app-server --app ../OpenQuestion --uplink-key 42 --port 3030") # start app server on a thread, allowing the rest of the script to run threading.Thread(target=start_server).start() # give time for the web server to spin up before continuing time.sleep(60) # connect anvil.server.connect('42', url="ws://localhost:3030/_/uplink") yield True # disconnect from uplink anvil.server.disconnect() # kill process on that port os.system("fuser -k 3030/tcp") def test_delete_survey(): form_id=str(uuid.uuid4()) # add a survey app_tables.forms.add_row(form_id=form_id, last_modified=datetime.now(), schema=schema, title=schema['title']) # current number of surveys init_num_forms = len(app_tables.forms.search()) # add a survey to the database delete_survey(form_id) # a simple assertion that there is one more survey in the database assert init_num_forms > len(app_tables.forms.search()) def test_submit_data(): """ Test that data is stored into the data after a submission Test that columns are added to the dataframe as expected even when using query string parameters in the URL hash """ form_id=str(uuid.uuid4()) # add a survey app_tables.forms.add_row(form_id=form_id, last_modified=datetime.now(), schema=schema, title=schema['title']) # submit some initial data cols=['a', 'b', 'c'] data=[1,2,3] submit_data(cols.copy(), data.copy(), {'form_id': form_id}) # submit data with query parameters (meta data) cols_from_hash=['form_id', '1st_param', '2nd_param'] submit_data(cols.copy(), data.copy(), {cols_from_hash[0]: form_id, cols_from_hash[1]: 'foo', cols_from_hash[2]: 'bar'}) media=app_tables.forms.get(form_id=form_id)['submissions'] df = pd.read_csv(io.BytesIO(media.get_bytes()), index_col=0) should_be_cols=cols + cols_from_hash new_cols=list(df.columns) assert new_cols==should_be_cols def test_check_opening_closing_dates(): """ assert that a survey is deemed inactive if closing date has passed """ x = datetime(2017, 11, 16, 23, 45, 15, 0, anvil.tz.tzoffset(hours=3)) y = datetime(2018, 11, 16, 23, 45, 15, 0, anvil.tz.tzoffset(hours=3)) try: check_opening_closing_dates(x, y) except Exception as e: error_str=str(e) assert error_str == 'survey inactive' def test_get_form(): form_id=str(uuid.uuid4()) # add a survey app_tables.forms.add_row(form_id=form_id, last_modified=datetime.now(), schema=schema, title=schema['title']) the_schema=get_form({'form_id': form_id, 'preview': True}) assert the_schema==schema def test_save_schema(): # current number of surveys init_num_forms=len(app_tables.forms.search()) # add a survey to the database save_schema(None, schema) # a simple assertion that there is one more survey in the database assert init_num_forms < len(app_tables.forms.search()) def test_save_survey_settings(): form_id=str(uuid.uuid4()) # add a survey row=app_tables.forms.add_row(form_id=form_id, last_modified=datetime.now(), schema=schema, title=schema['title']) settings_in_schema={'survey_color': '#F0F0F0', 'thank_you_msg': '#Thank you!'} settings_in_datatable={'opening_date': None, 'closing_date': None} save_survey_settings(form_id, settings_in_schema, settings_in_datatable) assert schema['settings'] != row['schema']['settings']
import os import threading import time from server_code.server_surveys import * import anvil.server import pytest import uuid from datetime import datetime import pandas as pd import io # basic survey schema={ "title": "simple survey", "settings": { "survey_color": "#2196F3", "thank_you_msg": "#Thank you!" }, "num_widgets": 2, "widgets": [ { "id": 0, "type": "section", "logic": None, "title": "section", "widgets": [ { "id": 1, "type": "text_box", "logic": None, "title": "what's your name?", "number": False, "mandatory": True, "placeholder": "placeholder here" } ] } ] } @pytest.fixture(scope="session", autouse=True) def set_up_and_tear_down(): """ Everything before "yield" is run before any tests Everything after "yield" is run after all tests have finished """ # kill process on port in case it is running os.system("fuser -k 3030/tcp") # func to call app server def start_server(): os.system("anvil-app-server --app ../OpenQuestion --uplink-key 42 --port 3030") # start app server on a thread, allowing the rest of the script to run threading.Thread(target=start_server).start() # give time for the web server to spin up before continuing time.sleep(60) # connect anvil.server.connect('42', url="ws://localhost:3030/_/uplink") yield True # disconnect from uplink anvil.server.disconnect() # kill process on that port os.system("fuser -k 3030/tcp") def test_delete_survey(): form_id=str(uuid.uuid4()) # add a survey app_tables.forms.add_row(form_id=form_id, last_modified=datetime.now(), schema=schema, title=schema['title']) # current number of surveys init_num_forms = len(app_tables.forms.search()) # add a survey to the database delete_survey(form_id) # a simple assertion that there is one more survey in the database assert init_num_forms > len(app_tables.forms.search()) def test_submit_data(): """ Test that data is stored into the data after a submission Test that columns are added to the dataframe as expected even when using query string parameters in the URL hash """ form_id=str(uuid.uuid4()) # add a survey app_tables.forms.add_row(form_id=form_id, last_modified=datetime.now(), schema=schema, title=schema['title']) # submit some initial data cols=['a', 'b', 'c'] data=[1,2,3] submit_data(cols.copy(), data.copy(), {'form_id': form_id}) # submit data with query parameters (meta data) cols_from_hash=['form_id', '1st_param', '2nd_param'] submit_data(cols.copy(), data.copy(), {cols_from_hash[0]: form_id, cols_from_hash[1]: 'foo', cols_from_hash[2]: 'bar'}) media=app_tables.forms.get(form_id=form_id)['submissions'] df = pd.read_csv(io.BytesIO(media.get_bytes()), index_col=0) should_be_cols=cols + cols_from_hash new_cols=list(df.columns) assert new_cols==should_be_cols def test_check_opening_closing_dates(): """ assert that a survey is deemed inactive if closing date has passed """ x = datetime(2017, 11, 16, 23, 45, 15, 0, anvil.tz.tzoffset(hours=3)) y = datetime(2018, 11, 16, 23, 45, 15, 0, anvil.tz.tzoffset(hours=3)) try: check_opening_closing_dates(x, y) except Exception as e: error_str=str(e) assert error_str == 'survey inactive' def test_get_form(): form_id=str(uuid.uuid4()) # add a survey app_tables.forms.add_row(form_id=form_id, last_modified=datetime.now(), schema=schema, title=schema['title']) the_schema=get_form({'form_id': form_id, 'preview': True}) assert the_schema==schema def test_save_schema(): # current number of surveys init_num_forms=len(app_tables.forms.search()) # add a survey to the database save_schema(None, schema) # a simple assertion that there is one more survey in the database assert init_num_forms < len(app_tables.forms.search()) def test_save_survey_settings(): form_id=str(uuid.uuid4()) # add a survey row=app_tables.forms.add_row(form_id=form_id, last_modified=datetime.now(), schema=schema, title=schema['title']) settings_in_schema={'survey_color': '#F0F0F0', 'thank_you_msg': '#Thank you!'} settings_in_datatable={'opening_date': None, 'closing_date': None} save_survey_settings(form_id, settings_in_schema, settings_in_datatable) assert schema['settings'] != row['schema']['settings']
en
0.846137
# basic survey Everything before "yield" is run before any tests Everything after "yield" is run after all tests have finished # kill process on port in case it is running # func to call app server # start app server on a thread, allowing the rest of the script to run # give time for the web server to spin up before continuing # connect # disconnect from uplink # kill process on that port # add a survey # current number of surveys # add a survey to the database # a simple assertion that there is one more survey in the database Test that data is stored into the data after a submission Test that columns are added to the dataframe as expected even when using query string parameters in the URL hash # add a survey # submit some initial data # submit data with query parameters (meta data) assert that a survey is deemed inactive if closing date has passed # add a survey # current number of surveys # add a survey to the database # a simple assertion that there is one more survey in the database # add a survey
2.287365
2
qiita_db/test/test_analysis.py
jlab/qiita
96
6631498
from unittest import TestCase, main from os import remove from os.path import exists, join, basename from shutil import move from biom import load_table from pandas.util.testing import assert_frame_equal from functools import partial from qiita_core.util import qiita_test_checker from qiita_core.testing import wait_for_processing_job from qiita_core.qiita_settings import qiita_config import qiita_db as qdb from json import dumps # ----------------------------------------------------------------------------- # Copyright (c) 2014--, The Qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- @qiita_test_checker() class TestAnalysis(TestCase): def setUp(self): self.analysis = qdb.analysis.Analysis(1) self.portal = qiita_config.portal _, self.fp = qdb.util.get_mountpoint("analysis")[0] self.get_fp = partial(join, self.fp) self.biom_fp = self.get_fp("1_analysis_dt-18S_r-1_c-3.biom") self._old_portal = qiita_config.portal self.table_fp = None # fullpaths for testing self.duplicated_samples_not_merged = self.get_fp( "not_merged_samples.txt") self.map_exp_fp = self.get_fp("1_analysis_mapping_exp.txt") from glob import glob conf_files = glob(join(qiita_config.plugin_dir, "BIOM*.conf")) for i, fp in enumerate(conf_files): qdb.software.Software.from_file(fp, update=True) def tearDown(self): self.analysis.artifacts[0].visibility = 'private' qiita_config.portal = self.portal with open(self.biom_fp, 'w') as f: f.write("") fp = self.get_fp('testfile.txt') if exists(fp): remove(fp) if self.table_fp: mp = qdb.util.get_mountpoint("processed_data")[0][1] if exists(self.table_fp): move(self.table_fp, join(mp, "2_study_1001_closed_reference_otu_table.biom")) qiita_config.portal = self._old_portal def _wait_for_jobs(self, analysis): for j in analysis.jobs: wait_for_processing_job(j.id) if j.status == 'error': print(j.log.msg) def _create_analyses_with_samples(self, user='<EMAIL>', merge=False): """Aux function to create an analysis with samples Parameters ---------- user : qiita_db.user.User, optional The user email to attach to the analysis. Default: <EMAIL> merge : bool, optional Merge duplicated ids or not Returns ------- qiita_db.analysis.Analysis Notes ----- Replicates the samples contained in Analysis(1) at the moment of creation of this function (September 15, 2016) """ user = qdb.user.User(user) dflt_analysis = user.default_analysis dflt_analysis.add_samples( {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']}) new = qdb.analysis.Analysis.create( user, "newAnalysis", "A New Analysis", from_default=True, merge_duplicated_sample_ids=merge) self._wait_for_jobs(new) return new def test_lock_samples(self): dflt = qdb.user.User('<EMAIL>').default_analysis # The default analysis can have samples added/removed dflt._lock_samples() QE = qdb.exceptions with self.assertRaises(QE.QiitaDBOperationNotPermittedError): qdb.analysis.Analysis(1)._lock_samples() def test_get_by_status(self): qiita_config.portal = 'QIITA' self.assertEqual( qdb.analysis.Analysis.get_by_status('public'), set([])) qiita_config.portal = 'EMP' self.assertEqual( qdb.analysis.Analysis.get_by_status('public'), set([])) qiita_config.portal = 'QIITA' self.analysis.artifacts[0].visibility = 'public' self.assertEqual(qdb.analysis.Analysis.get_by_status('public'), {self.analysis}) qiita_config.portal = 'EMP' self.assertEqual( qdb.analysis.Analysis.get_by_status('public'), set([])) def test_can_be_publicized(self): analysis = qdb.analysis.Analysis(1) self.assertEqual(analysis.can_be_publicized, (False, [4, 5, 6])) a4 = qdb.artifact.Artifact(4) a4.visibility = 'public' self.assertEqual(analysis.can_be_publicized, (True, [])) a4.visibility = 'private' self.assertEqual(analysis.can_be_publicized, (False, [4, 5, 6])) def test_add_artifact(self): obs = self._create_analyses_with_samples() exp = qdb.artifact.Artifact(4) obs.add_artifact(exp) self.assertIn(exp, obs.artifacts) def test_has_access_public(self): analysis = self._create_analyses_with_samples("<EMAIL>") analysis.artifacts[0].visibility = 'public' qiita_config.portal = 'QIITA' self.assertTrue( analysis.has_access(qdb.user.User("<EMAIL>"))) qiita_config.portal = 'EMP' self.assertFalse( analysis.has_access(qdb.user.User("<EMAIL>"))) def test_has_access_shared(self): self.assertTrue( self.analysis.has_access(qdb.user.User("<EMAIL>"))) def test_has_access_private(self): self.assertTrue( self.analysis.has_access(qdb.user.User("<EMAIL>"))) def test_has_access_admin(self): qiita_config.portal = 'QIITA' self.assertTrue( self.analysis.has_access(qdb.user.User("<EMAIL>"))) qiita_config.portal = 'EMP' with self.assertRaises(qdb.exceptions.QiitaDBError): qdb.analysis.Analysis(1).has_access(qdb.user.User("<EMAIL>")) def test_has_access_no_access(self): self.assertFalse( self.analysis.has_access(qdb.user.User("<EMAIL>"))) def test_can_edit(self): a = qdb.analysis.Analysis(1) self.assertTrue(a.can_edit(qdb.user.User('<EMAIL>'))) self.assertTrue(a.can_edit(qdb.user.User('<EMAIL>'))) self.assertTrue(a.can_edit(qdb.user.User('<EMAIL>'))) self.assertFalse(a.can_edit(qdb.user.User('<EMAIL>'))) def test_create_nonqiita_portal(self): qiita_config.portal = "EMP" obs = qdb.analysis.Analysis.create( qdb.user.User("<EMAIL>"), "newAnalysis", "A New Analysis") # make sure portal is associated self.assertCountEqual(obs._portals, ["QIITA", "EMP"]) def test_create_from_default(self): with qdb.sql_connection.TRN: sql = "SELECT NOW()" qdb.sql_connection.TRN.add(sql) time1 = qdb.sql_connection.TRN.execute_fetchlast() owner = qdb.user.User("<EMAIL>") obs = qdb.analysis.Analysis.create( owner, "newAnalysis", "A New Analysis", from_default=True) self.assertEqual(obs.owner, owner) self.assertEqual(obs.name, "newAnalysis") self.assertEqual(obs._portals, ["QIITA"]) self.assertLess(time1, obs.timestamp) self.assertEqual(obs.description, "A New Analysis") self.assertCountEqual(obs.samples, [4]) self.assertCountEqual( obs.samples[4], ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']) self.assertEqual(obs.data_types, ['18S']) self.assertEqual(obs.shared_with, []) self.assertEqual(obs.mapping_file, None) self.assertEqual(obs.tgz, None) self.assertNotEqual(obs.jobs, []) self.assertEqual(obs.pmid, None) def test_exists(self): qiita_config.portal = 'QIITA' self.assertTrue(qdb.analysis.Analysis.exists(1)) self.assertFalse(qdb.analysis.Analysis.exists(1000)) qiita_config.portal = 'EMP' self.assertFalse(qdb.analysis.Analysis.exists(1)) self.assertFalse(qdb.analysis.Analysis.exists(1000)) def test_delete(self): # successful delete new = qdb.analysis.Analysis.create( qdb.user.User('<EMAIL>'), "newAnalysis", "A New Analysis") self.assertTrue(qdb.analysis.Analysis.exists(new.id)) qdb.analysis.Analysis.delete(new.id) self.assertFalse(qdb.analysis.Analysis.exists(new.id)) # no possible to delete QE = qdb.exceptions with self.assertRaises(QE.QiitaDBUnknownIDError): qdb.analysis.Analysis.delete(new.id) # Analysis with artifacts with self.assertRaises(QE.QiitaDBOperationNotPermittedError): qdb.analysis.Analysis.delete(1) def test_retrieve_owner(self): self.assertEqual(self.analysis.owner, qdb.user.User("<EMAIL>")) def test_retrieve_name(self): self.assertEqual(self.analysis.name, "SomeAnalysis") def test_retrieve_description(self): self.assertEqual(self.analysis.description, "A test analysis") def test_set_description(self): self.analysis.description = "New description" self.assertEqual(self.analysis.description, "New description") def test_retrieve_samples(self): exp = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 5: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']} self.assertCountEqual(self.analysis.samples, exp) def test_retrieve_portal(self): self.assertEqual(self.analysis._portals, ["QIITA"]) def test_retrieve_data_types(self): exp = ['18S', '16S'] self.assertCountEqual(self.analysis.data_types, exp) def test_retrieve_shared_with(self): self.assertEqual(self.analysis.shared_with, [qdb.user.User("<EMAIL>")]) def test_retrieve_jobs(self): self.assertEqual(self.analysis.jobs, []) def test_retrieve_pmid(self): self.assertEqual(self.analysis.pmid, "121112") def test_set_pmid(self): new = self._create_analyses_with_samples("<EMAIL>") self.assertIsNone(new.pmid) new.pmid = "11211221212213" self.assertEqual(new.pmid, "11211221212213") def test_retrieve_mapping_file(self): exp = join(self.fp, "1_analysis_mapping.txt") obs = self.analysis.mapping_file self.assertIsNotNone(obs) self.assertEqual( qdb.util.get_filepath_information(obs)['fullpath'], exp) self.assertTrue(exists(exp)) def test_retrieve_tgz(self): # generating here as the tgz is only generated once the analysis runs # to completion (un)successfully analysis = self._create_analyses_with_samples("<EMAIL>") fp = self.get_fp('test.tgz') with open(fp, 'w') as f: f.write('') analysis._add_file(fp, 'tgz') self.assertEqual(analysis.tgz, fp) def test_retrieve_tgz_none(self): self.assertIsNone(self.analysis.tgz) def test_summary_data(self): obs = self.analysis.summary_data() exp = {'studies': 1, 'artifacts': 3, 'samples': 5} self.assertEqual(obs, exp) def test_add_remove_samples(self): analysis = qdb.user.User('<EMAIL>').default_analysis exp = {4: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKB8.640193'], 5: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKB8.640193'], 6: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKB8.640193']} analysis.add_samples(exp) obs = analysis.samples self.assertCountEqual(list(obs.keys()), exp.keys()) for k in obs: self.assertCountEqual(obs[k], exp[k]) analysis.remove_samples(artifacts=(qdb.artifact.Artifact(4), ), samples=('1.SKB8.640193', )) exp = {4: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 5: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKB8.640193'], 6: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKB8.640193']} obs = analysis.samples self.assertCountEqual(list(obs.keys()), exp.keys()) for k in obs: self.assertCountEqual(obs[k], exp[k]) analysis.remove_samples(samples=('1.SKD8.640184', )) exp = {4: ['1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 5: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 6: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']} self.assertCountEqual(analysis.samples, exp) analysis.remove_samples( artifacts=(qdb.artifact.Artifact(4), qdb.artifact.Artifact(5))) exp = {6: {'1.SKB7.640196', '1.SKB8.640193', '1.SKM4.640180', '1.SKM9.640192'}} self.assertCountEqual(analysis.samples, exp) def test_share_unshare(self): analysis = self._create_analyses_with_samples() user = qdb.user.User("<EMAIL>") self.assertEqual(analysis.shared_with, []) analysis.share(user) exp = [user] self.assertEqual(analysis.shared_with, exp) analysis.unshare(user) self.assertEqual(analysis.shared_with, []) def test_build_mapping_file(self): analysis = self._create_analyses_with_samples() samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']} analysis._build_mapping_file(samples) obs = qdb.util.get_filepath_information( analysis.mapping_file)['fullpath'] exp = self.get_fp("%s_analysis_mapping.txt" % analysis.id) self.assertEqual(obs, exp) obs = qdb.metadata_template.util.load_template_to_dataframe( obs, index='#SampleID') exp = qdb.metadata_template.util.load_template_to_dataframe( self.map_exp_fp, index='#SampleID') # assert_frame_equal assumes same order on the rows, thus sorting # frames by index obs.sort_index(inplace=True) exp.sort_index(inplace=True) # then sorting columns obs = obs.reindex(sorted(obs.columns), axis=1) exp = exp.reindex(sorted(exp.columns), axis=1) assert_frame_equal(obs, exp, check_like=True) def test_build_mapping_file_duplicated_samples_no_merge(self): analysis = self._create_analyses_with_samples() samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'], 3: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']} analysis._build_mapping_file(samples, True) mapping_fp = qdb.util.get_filepath_information( analysis.mapping_file)['fullpath'] obs = qdb.metadata_template.util.load_template_to_dataframe( mapping_fp, index='#SampleID') exp = qdb.metadata_template.util.load_template_to_dataframe( self.duplicated_samples_not_merged, index='#SampleID') # assert_frame_equal assumes same order on the rows, thus sorting # frames by index obs.sort_index(inplace=True) exp.sort_index(inplace=True) # then sorting columns obs = obs.reindex(sorted(obs.columns), axis=1) exp = exp.reindex(sorted(exp.columns), axis=1) assert_frame_equal(obs, exp, check_like=True) def test_build_mapping_file_duplicated_samples_merge(self): analysis = self._create_analyses_with_samples() samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'], 3: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']} analysis._build_mapping_file(samples) mapping_fp = qdb.util.get_filepath_information( analysis.mapping_file)['fullpath'] obs = qdb.metadata_template.util.load_template_to_dataframe( mapping_fp, index='#SampleID') exp = qdb.metadata_template.util.load_template_to_dataframe( self.map_exp_fp, index='#SampleID') # assert_frame_equal assumes same order on the rows, thus sorting # frames by index obs.sort_index(inplace=True) exp.sort_index(inplace=True) # then sorting columns obs = obs.reindex(sorted(obs.columns), axis=1) exp = exp.reindex(sorted(exp.columns), axis=1) assert_frame_equal(obs, exp, check_like=True) def test_build_biom_tables(self): analysis = self._create_analyses_with_samples() grouped_samples = { '18S || algorithm': [ (4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]} obs_bioms = analysis._build_biom_tables(grouped_samples) biom_fp = self.get_fp( "%s_analysis_18S_algorithm.biom" % analysis.id) obs = [(a, basename(b)) for a, b, _ in obs_bioms] self.assertEqual(obs, [('18S', basename(biom_fp))]) table = load_table(obs_bioms[0][1]) obs = set(table.ids(axis='sample')) exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'} self.assertEqual(obs, exp) def test_build_biom_tables_with_references(self): analysis = self._create_analyses_with_samples() analysis_id = analysis.id grouped_samples = { ('18S || Pick closed-reference OTUs (reference: 1) | ' 'Split libraries FASTQ'): [ (4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']), (5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])], ('18S || Pick closed-reference OTUs (reference: 1) | ' 'Trim (lenght: 150)'): [ (4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']), (5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])], ('16S || Pick closed-reference OTUs (reference: 2) | ' 'Trim (lenght: 100)'): [ (4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']), (5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]} obs_bioms = analysis._build_biom_tables(grouped_samples) obs = [(a, basename(b)) for a, b, _ in obs_bioms] exp = [ ('16S', '%s_analysis_16S_PickclosedreferenceOTUsreference2' 'Trimlenght100.biom' % analysis_id), ('18S', '%s_analysis_18S_PickclosedreferenceOTUsreference1' 'SplitlibrariesFASTQ.biom' % analysis_id), ('18S', '%s_analysis_18S_PickclosedreferenceOTUsreference1' 'Trimlenght150.biom' % analysis_id)] self.assertCountEqual(obs, exp) exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'} for dt, fp, _ in obs_bioms: table = load_table(fp) obs = set(table.ids(axis='sample')) self.assertEqual(obs, exp) def test_build_biom_tables_duplicated_samples_not_merge(self): analysis = self._create_analyses_with_samples() grouped_samples = { '18S || algorithm': [ (4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']), (5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]} obs_bioms = analysis._build_biom_tables(grouped_samples, True) obs = [(a, basename(b)) for a, b, _ in obs_bioms] biom_fp = ( "%s_analysis_18S_algorithm.biom" % analysis.id) self.assertEqual(obs, [('18S', biom_fp)]) table = load_table(obs_bioms[0][1]) obs = set(table.ids(axis='sample')) exp = {'4.1.SKD8.640184', '4.1.SKB7.640196', '4.1.SKB8.640193', '5.1.SKB8.640193', '5.1.SKB7.640196', '5.1.SKD8.640184'} self.assertCountEqual(obs, exp) def test_build_biom_tables_raise_error_due_to_sample_selection(self): grouped_samples = { '18S || algorithm': [ (4, ['sample_name_1', 'sample_name_2', 'sample_name_3'])]} with self.assertRaises(RuntimeError): self.analysis._build_biom_tables(grouped_samples) def test_build_files(self): analysis = self._create_analyses_with_samples() biom_tables = analysis.build_files(True) # testing that the generated files have the same sample ids biom_fp = biom_tables[0][1] biom_ids = load_table(biom_fp).ids(axis='sample') mapping_fp = qdb.util.get_filepath_information( analysis.mapping_file)['fullpath'] mf_ids = qdb.metadata_template.util.load_template_to_dataframe( mapping_fp, index='#SampleID').index self.assertCountEqual(biom_ids, mf_ids) # now that the samples have been prefixed exp = ['1.SKM9.640192', '1.SKM4.640180', '1.SKD8.640184', '1.SKB8.640193', '1.SKB7.640196'] self.assertCountEqual(biom_ids, exp) def test_build_files_post_processing_cmd(self): tmp = qdb.artifact.Artifact(4).processing_parameters.command cmd_id = tmp.id # set a known artifact's additional processing command # to a known value. Then test for it. # qiita_db/test/support_files/worker.py will work w/py2.7 & 3.6 envs. results = {} results['script_env'] = 'source deactivate; source activate qiita;' results['script_path'] = 'qiita_db/test/support_files/worker.py' # no additional parameters are needed for worker.py # fp_biom and fp_archive will be generated by build_files() results['script_params'] = {} # convert to json representation and store in PostgreSQL results = dumps(results) sql = """UPDATE qiita.software_command SET post_processing_cmd = %s WHERE command_id = %s""" qdb.sql_connection.perform_as_transaction(sql, [results, cmd_id]) # create a sample analysis and run build_files on it. analysis = self._create_analyses_with_samples() biom_files = analysis.build_files(False) # if build_files used additional processing commands, it will # return a couple of tuples, where the third element contains # output archive-artifact data. self.assertEqual(2, len(biom_files)) aid = analysis.id exp = [('%d_analysis_18S_PickclosedreferenceOTUsSplitlibraries' 'FASTQ.biom' % aid, None), ('%d_analysis_18S_PickclosedreferenceOTUsSplitlibraries' 'FASTQ.biom' % aid, 'archive_%d.tre' % aid)] obs = [(basename(fp1), basename(fp2) if fp2 is not None else None) for _, fp1, fp2 in biom_files] self.assertEqual(obs, exp) # cleanup (assume command was NULL previously) sql = """UPDATE qiita.software_command SET post_processing_cmd = NULL WHERE command_id = %s""" qdb.sql_connection.perform_as_transaction(sql, [cmd_id]) def test_build_files_merge_duplicated_sample_ids(self): user = qdb.user.User("<EMAIL>") dflt_analysis = user.default_analysis dflt_analysis.add_samples( {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 5: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKD8.640184'], 6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']}) new = qdb.analysis.Analysis.create( user, "newAnalysis", "A New Analysis", from_default=True, merge_duplicated_sample_ids=True) self._wait_for_jobs(new) biom_tables = new.build_files(False) # testing that the generated files have the same sample ids biom_ids = [] for _, fp, _ in biom_tables: biom_ids.extend(load_table(fp).ids(axis='sample')) mapping_fp = qdb.util.get_filepath_information( new.mapping_file)['fullpath'] mf_ids = qdb.metadata_template.util.load_template_to_dataframe( mapping_fp, index='#SampleID').index self.assertCountEqual(biom_ids, mf_ids) # now that the samples have been prefixed exp = ['4.1.SKM9.640192', '4.1.SKM4.640180', '4.1.SKD8.640184', '4.1.SKB8.640193', '4.1.SKB7.640196', '5.1.SKM9.640192', '5.1.SKM4.640180', '5.1.SKD8.640184', '5.1.SKB8.640193', '5.1.SKB7.640196', '6.1.SKM9.640192', '6.1.SKM4.640180', '6.1.SKD8.640184', '6.1.SKB8.640193', '6.1.SKB7.640196'] self.assertCountEqual(biom_ids, exp) def test_add_file(self): # Tested indirectly through build_files pass def test_is_public_make_public(self): analysis = self._create_analyses_with_samples() self.assertFalse(analysis.is_public) # testing errors with self.assertRaises(ValueError): analysis.make_public() # testing successfully making public # 4 is the only artifact being used in _create_analyses_with_samples qdb.artifact.Artifact(4).visibility = 'public' analysis.make_public() self.assertTrue(analysis.is_public) if __name__ == "__main__": main()
from unittest import TestCase, main from os import remove from os.path import exists, join, basename from shutil import move from biom import load_table from pandas.util.testing import assert_frame_equal from functools import partial from qiita_core.util import qiita_test_checker from qiita_core.testing import wait_for_processing_job from qiita_core.qiita_settings import qiita_config import qiita_db as qdb from json import dumps # ----------------------------------------------------------------------------- # Copyright (c) 2014--, The Qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- @qiita_test_checker() class TestAnalysis(TestCase): def setUp(self): self.analysis = qdb.analysis.Analysis(1) self.portal = qiita_config.portal _, self.fp = qdb.util.get_mountpoint("analysis")[0] self.get_fp = partial(join, self.fp) self.biom_fp = self.get_fp("1_analysis_dt-18S_r-1_c-3.biom") self._old_portal = qiita_config.portal self.table_fp = None # fullpaths for testing self.duplicated_samples_not_merged = self.get_fp( "not_merged_samples.txt") self.map_exp_fp = self.get_fp("1_analysis_mapping_exp.txt") from glob import glob conf_files = glob(join(qiita_config.plugin_dir, "BIOM*.conf")) for i, fp in enumerate(conf_files): qdb.software.Software.from_file(fp, update=True) def tearDown(self): self.analysis.artifacts[0].visibility = 'private' qiita_config.portal = self.portal with open(self.biom_fp, 'w') as f: f.write("") fp = self.get_fp('testfile.txt') if exists(fp): remove(fp) if self.table_fp: mp = qdb.util.get_mountpoint("processed_data")[0][1] if exists(self.table_fp): move(self.table_fp, join(mp, "2_study_1001_closed_reference_otu_table.biom")) qiita_config.portal = self._old_portal def _wait_for_jobs(self, analysis): for j in analysis.jobs: wait_for_processing_job(j.id) if j.status == 'error': print(j.log.msg) def _create_analyses_with_samples(self, user='<EMAIL>', merge=False): """Aux function to create an analysis with samples Parameters ---------- user : qiita_db.user.User, optional The user email to attach to the analysis. Default: <EMAIL> merge : bool, optional Merge duplicated ids or not Returns ------- qiita_db.analysis.Analysis Notes ----- Replicates the samples contained in Analysis(1) at the moment of creation of this function (September 15, 2016) """ user = qdb.user.User(user) dflt_analysis = user.default_analysis dflt_analysis.add_samples( {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']}) new = qdb.analysis.Analysis.create( user, "newAnalysis", "A New Analysis", from_default=True, merge_duplicated_sample_ids=merge) self._wait_for_jobs(new) return new def test_lock_samples(self): dflt = qdb.user.User('<EMAIL>').default_analysis # The default analysis can have samples added/removed dflt._lock_samples() QE = qdb.exceptions with self.assertRaises(QE.QiitaDBOperationNotPermittedError): qdb.analysis.Analysis(1)._lock_samples() def test_get_by_status(self): qiita_config.portal = 'QIITA' self.assertEqual( qdb.analysis.Analysis.get_by_status('public'), set([])) qiita_config.portal = 'EMP' self.assertEqual( qdb.analysis.Analysis.get_by_status('public'), set([])) qiita_config.portal = 'QIITA' self.analysis.artifacts[0].visibility = 'public' self.assertEqual(qdb.analysis.Analysis.get_by_status('public'), {self.analysis}) qiita_config.portal = 'EMP' self.assertEqual( qdb.analysis.Analysis.get_by_status('public'), set([])) def test_can_be_publicized(self): analysis = qdb.analysis.Analysis(1) self.assertEqual(analysis.can_be_publicized, (False, [4, 5, 6])) a4 = qdb.artifact.Artifact(4) a4.visibility = 'public' self.assertEqual(analysis.can_be_publicized, (True, [])) a4.visibility = 'private' self.assertEqual(analysis.can_be_publicized, (False, [4, 5, 6])) def test_add_artifact(self): obs = self._create_analyses_with_samples() exp = qdb.artifact.Artifact(4) obs.add_artifact(exp) self.assertIn(exp, obs.artifacts) def test_has_access_public(self): analysis = self._create_analyses_with_samples("<EMAIL>") analysis.artifacts[0].visibility = 'public' qiita_config.portal = 'QIITA' self.assertTrue( analysis.has_access(qdb.user.User("<EMAIL>"))) qiita_config.portal = 'EMP' self.assertFalse( analysis.has_access(qdb.user.User("<EMAIL>"))) def test_has_access_shared(self): self.assertTrue( self.analysis.has_access(qdb.user.User("<EMAIL>"))) def test_has_access_private(self): self.assertTrue( self.analysis.has_access(qdb.user.User("<EMAIL>"))) def test_has_access_admin(self): qiita_config.portal = 'QIITA' self.assertTrue( self.analysis.has_access(qdb.user.User("<EMAIL>"))) qiita_config.portal = 'EMP' with self.assertRaises(qdb.exceptions.QiitaDBError): qdb.analysis.Analysis(1).has_access(qdb.user.User("<EMAIL>")) def test_has_access_no_access(self): self.assertFalse( self.analysis.has_access(qdb.user.User("<EMAIL>"))) def test_can_edit(self): a = qdb.analysis.Analysis(1) self.assertTrue(a.can_edit(qdb.user.User('<EMAIL>'))) self.assertTrue(a.can_edit(qdb.user.User('<EMAIL>'))) self.assertTrue(a.can_edit(qdb.user.User('<EMAIL>'))) self.assertFalse(a.can_edit(qdb.user.User('<EMAIL>'))) def test_create_nonqiita_portal(self): qiita_config.portal = "EMP" obs = qdb.analysis.Analysis.create( qdb.user.User("<EMAIL>"), "newAnalysis", "A New Analysis") # make sure portal is associated self.assertCountEqual(obs._portals, ["QIITA", "EMP"]) def test_create_from_default(self): with qdb.sql_connection.TRN: sql = "SELECT NOW()" qdb.sql_connection.TRN.add(sql) time1 = qdb.sql_connection.TRN.execute_fetchlast() owner = qdb.user.User("<EMAIL>") obs = qdb.analysis.Analysis.create( owner, "newAnalysis", "A New Analysis", from_default=True) self.assertEqual(obs.owner, owner) self.assertEqual(obs.name, "newAnalysis") self.assertEqual(obs._portals, ["QIITA"]) self.assertLess(time1, obs.timestamp) self.assertEqual(obs.description, "A New Analysis") self.assertCountEqual(obs.samples, [4]) self.assertCountEqual( obs.samples[4], ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']) self.assertEqual(obs.data_types, ['18S']) self.assertEqual(obs.shared_with, []) self.assertEqual(obs.mapping_file, None) self.assertEqual(obs.tgz, None) self.assertNotEqual(obs.jobs, []) self.assertEqual(obs.pmid, None) def test_exists(self): qiita_config.portal = 'QIITA' self.assertTrue(qdb.analysis.Analysis.exists(1)) self.assertFalse(qdb.analysis.Analysis.exists(1000)) qiita_config.portal = 'EMP' self.assertFalse(qdb.analysis.Analysis.exists(1)) self.assertFalse(qdb.analysis.Analysis.exists(1000)) def test_delete(self): # successful delete new = qdb.analysis.Analysis.create( qdb.user.User('<EMAIL>'), "newAnalysis", "A New Analysis") self.assertTrue(qdb.analysis.Analysis.exists(new.id)) qdb.analysis.Analysis.delete(new.id) self.assertFalse(qdb.analysis.Analysis.exists(new.id)) # no possible to delete QE = qdb.exceptions with self.assertRaises(QE.QiitaDBUnknownIDError): qdb.analysis.Analysis.delete(new.id) # Analysis with artifacts with self.assertRaises(QE.QiitaDBOperationNotPermittedError): qdb.analysis.Analysis.delete(1) def test_retrieve_owner(self): self.assertEqual(self.analysis.owner, qdb.user.User("<EMAIL>")) def test_retrieve_name(self): self.assertEqual(self.analysis.name, "SomeAnalysis") def test_retrieve_description(self): self.assertEqual(self.analysis.description, "A test analysis") def test_set_description(self): self.analysis.description = "New description" self.assertEqual(self.analysis.description, "New description") def test_retrieve_samples(self): exp = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 5: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']} self.assertCountEqual(self.analysis.samples, exp) def test_retrieve_portal(self): self.assertEqual(self.analysis._portals, ["QIITA"]) def test_retrieve_data_types(self): exp = ['18S', '16S'] self.assertCountEqual(self.analysis.data_types, exp) def test_retrieve_shared_with(self): self.assertEqual(self.analysis.shared_with, [qdb.user.User("<EMAIL>")]) def test_retrieve_jobs(self): self.assertEqual(self.analysis.jobs, []) def test_retrieve_pmid(self): self.assertEqual(self.analysis.pmid, "121112") def test_set_pmid(self): new = self._create_analyses_with_samples("<EMAIL>") self.assertIsNone(new.pmid) new.pmid = "11211221212213" self.assertEqual(new.pmid, "11211221212213") def test_retrieve_mapping_file(self): exp = join(self.fp, "1_analysis_mapping.txt") obs = self.analysis.mapping_file self.assertIsNotNone(obs) self.assertEqual( qdb.util.get_filepath_information(obs)['fullpath'], exp) self.assertTrue(exists(exp)) def test_retrieve_tgz(self): # generating here as the tgz is only generated once the analysis runs # to completion (un)successfully analysis = self._create_analyses_with_samples("<EMAIL>") fp = self.get_fp('test.tgz') with open(fp, 'w') as f: f.write('') analysis._add_file(fp, 'tgz') self.assertEqual(analysis.tgz, fp) def test_retrieve_tgz_none(self): self.assertIsNone(self.analysis.tgz) def test_summary_data(self): obs = self.analysis.summary_data() exp = {'studies': 1, 'artifacts': 3, 'samples': 5} self.assertEqual(obs, exp) def test_add_remove_samples(self): analysis = qdb.user.User('<EMAIL>').default_analysis exp = {4: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKB8.640193'], 5: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKB8.640193'], 6: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKB8.640193']} analysis.add_samples(exp) obs = analysis.samples self.assertCountEqual(list(obs.keys()), exp.keys()) for k in obs: self.assertCountEqual(obs[k], exp[k]) analysis.remove_samples(artifacts=(qdb.artifact.Artifact(4), ), samples=('1.SKB8.640193', )) exp = {4: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 5: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKB8.640193'], 6: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKB8.640193']} obs = analysis.samples self.assertCountEqual(list(obs.keys()), exp.keys()) for k in obs: self.assertCountEqual(obs[k], exp[k]) analysis.remove_samples(samples=('1.SKD8.640184', )) exp = {4: ['1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 5: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 6: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']} self.assertCountEqual(analysis.samples, exp) analysis.remove_samples( artifacts=(qdb.artifact.Artifact(4), qdb.artifact.Artifact(5))) exp = {6: {'1.SKB7.640196', '1.SKB8.640193', '1.SKM4.640180', '1.SKM9.640192'}} self.assertCountEqual(analysis.samples, exp) def test_share_unshare(self): analysis = self._create_analyses_with_samples() user = qdb.user.User("<EMAIL>") self.assertEqual(analysis.shared_with, []) analysis.share(user) exp = [user] self.assertEqual(analysis.shared_with, exp) analysis.unshare(user) self.assertEqual(analysis.shared_with, []) def test_build_mapping_file(self): analysis = self._create_analyses_with_samples() samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']} analysis._build_mapping_file(samples) obs = qdb.util.get_filepath_information( analysis.mapping_file)['fullpath'] exp = self.get_fp("%s_analysis_mapping.txt" % analysis.id) self.assertEqual(obs, exp) obs = qdb.metadata_template.util.load_template_to_dataframe( obs, index='#SampleID') exp = qdb.metadata_template.util.load_template_to_dataframe( self.map_exp_fp, index='#SampleID') # assert_frame_equal assumes same order on the rows, thus sorting # frames by index obs.sort_index(inplace=True) exp.sort_index(inplace=True) # then sorting columns obs = obs.reindex(sorted(obs.columns), axis=1) exp = exp.reindex(sorted(exp.columns), axis=1) assert_frame_equal(obs, exp, check_like=True) def test_build_mapping_file_duplicated_samples_no_merge(self): analysis = self._create_analyses_with_samples() samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'], 3: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']} analysis._build_mapping_file(samples, True) mapping_fp = qdb.util.get_filepath_information( analysis.mapping_file)['fullpath'] obs = qdb.metadata_template.util.load_template_to_dataframe( mapping_fp, index='#SampleID') exp = qdb.metadata_template.util.load_template_to_dataframe( self.duplicated_samples_not_merged, index='#SampleID') # assert_frame_equal assumes same order on the rows, thus sorting # frames by index obs.sort_index(inplace=True) exp.sort_index(inplace=True) # then sorting columns obs = obs.reindex(sorted(obs.columns), axis=1) exp = exp.reindex(sorted(exp.columns), axis=1) assert_frame_equal(obs, exp, check_like=True) def test_build_mapping_file_duplicated_samples_merge(self): analysis = self._create_analyses_with_samples() samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'], 3: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']} analysis._build_mapping_file(samples) mapping_fp = qdb.util.get_filepath_information( analysis.mapping_file)['fullpath'] obs = qdb.metadata_template.util.load_template_to_dataframe( mapping_fp, index='#SampleID') exp = qdb.metadata_template.util.load_template_to_dataframe( self.map_exp_fp, index='#SampleID') # assert_frame_equal assumes same order on the rows, thus sorting # frames by index obs.sort_index(inplace=True) exp.sort_index(inplace=True) # then sorting columns obs = obs.reindex(sorted(obs.columns), axis=1) exp = exp.reindex(sorted(exp.columns), axis=1) assert_frame_equal(obs, exp, check_like=True) def test_build_biom_tables(self): analysis = self._create_analyses_with_samples() grouped_samples = { '18S || algorithm': [ (4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]} obs_bioms = analysis._build_biom_tables(grouped_samples) biom_fp = self.get_fp( "%s_analysis_18S_algorithm.biom" % analysis.id) obs = [(a, basename(b)) for a, b, _ in obs_bioms] self.assertEqual(obs, [('18S', basename(biom_fp))]) table = load_table(obs_bioms[0][1]) obs = set(table.ids(axis='sample')) exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'} self.assertEqual(obs, exp) def test_build_biom_tables_with_references(self): analysis = self._create_analyses_with_samples() analysis_id = analysis.id grouped_samples = { ('18S || Pick closed-reference OTUs (reference: 1) | ' 'Split libraries FASTQ'): [ (4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']), (5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])], ('18S || Pick closed-reference OTUs (reference: 1) | ' 'Trim (lenght: 150)'): [ (4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']), (5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])], ('16S || Pick closed-reference OTUs (reference: 2) | ' 'Trim (lenght: 100)'): [ (4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']), (5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]} obs_bioms = analysis._build_biom_tables(grouped_samples) obs = [(a, basename(b)) for a, b, _ in obs_bioms] exp = [ ('16S', '%s_analysis_16S_PickclosedreferenceOTUsreference2' 'Trimlenght100.biom' % analysis_id), ('18S', '%s_analysis_18S_PickclosedreferenceOTUsreference1' 'SplitlibrariesFASTQ.biom' % analysis_id), ('18S', '%s_analysis_18S_PickclosedreferenceOTUsreference1' 'Trimlenght150.biom' % analysis_id)] self.assertCountEqual(obs, exp) exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'} for dt, fp, _ in obs_bioms: table = load_table(fp) obs = set(table.ids(axis='sample')) self.assertEqual(obs, exp) def test_build_biom_tables_duplicated_samples_not_merge(self): analysis = self._create_analyses_with_samples() grouped_samples = { '18S || algorithm': [ (4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']), (5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]} obs_bioms = analysis._build_biom_tables(grouped_samples, True) obs = [(a, basename(b)) for a, b, _ in obs_bioms] biom_fp = ( "%s_analysis_18S_algorithm.biom" % analysis.id) self.assertEqual(obs, [('18S', biom_fp)]) table = load_table(obs_bioms[0][1]) obs = set(table.ids(axis='sample')) exp = {'4.1.SKD8.640184', '4.1.SKB7.640196', '4.1.SKB8.640193', '5.1.SKB8.640193', '5.1.SKB7.640196', '5.1.SKD8.640184'} self.assertCountEqual(obs, exp) def test_build_biom_tables_raise_error_due_to_sample_selection(self): grouped_samples = { '18S || algorithm': [ (4, ['sample_name_1', 'sample_name_2', 'sample_name_3'])]} with self.assertRaises(RuntimeError): self.analysis._build_biom_tables(grouped_samples) def test_build_files(self): analysis = self._create_analyses_with_samples() biom_tables = analysis.build_files(True) # testing that the generated files have the same sample ids biom_fp = biom_tables[0][1] biom_ids = load_table(biom_fp).ids(axis='sample') mapping_fp = qdb.util.get_filepath_information( analysis.mapping_file)['fullpath'] mf_ids = qdb.metadata_template.util.load_template_to_dataframe( mapping_fp, index='#SampleID').index self.assertCountEqual(biom_ids, mf_ids) # now that the samples have been prefixed exp = ['1.SKM9.640192', '1.SKM4.640180', '1.SKD8.640184', '1.SKB8.640193', '1.SKB7.640196'] self.assertCountEqual(biom_ids, exp) def test_build_files_post_processing_cmd(self): tmp = qdb.artifact.Artifact(4).processing_parameters.command cmd_id = tmp.id # set a known artifact's additional processing command # to a known value. Then test for it. # qiita_db/test/support_files/worker.py will work w/py2.7 & 3.6 envs. results = {} results['script_env'] = 'source deactivate; source activate qiita;' results['script_path'] = 'qiita_db/test/support_files/worker.py' # no additional parameters are needed for worker.py # fp_biom and fp_archive will be generated by build_files() results['script_params'] = {} # convert to json representation and store in PostgreSQL results = dumps(results) sql = """UPDATE qiita.software_command SET post_processing_cmd = %s WHERE command_id = %s""" qdb.sql_connection.perform_as_transaction(sql, [results, cmd_id]) # create a sample analysis and run build_files on it. analysis = self._create_analyses_with_samples() biom_files = analysis.build_files(False) # if build_files used additional processing commands, it will # return a couple of tuples, where the third element contains # output archive-artifact data. self.assertEqual(2, len(biom_files)) aid = analysis.id exp = [('%d_analysis_18S_PickclosedreferenceOTUsSplitlibraries' 'FASTQ.biom' % aid, None), ('%d_analysis_18S_PickclosedreferenceOTUsSplitlibraries' 'FASTQ.biom' % aid, 'archive_%d.tre' % aid)] obs = [(basename(fp1), basename(fp2) if fp2 is not None else None) for _, fp1, fp2 in biom_files] self.assertEqual(obs, exp) # cleanup (assume command was NULL previously) sql = """UPDATE qiita.software_command SET post_processing_cmd = NULL WHERE command_id = %s""" qdb.sql_connection.perform_as_transaction(sql, [cmd_id]) def test_build_files_merge_duplicated_sample_ids(self): user = qdb.user.User("<EMAIL>") dflt_analysis = user.default_analysis dflt_analysis.add_samples( {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'], 5: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180', '1.SKD8.640184'], 6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']}) new = qdb.analysis.Analysis.create( user, "newAnalysis", "A New Analysis", from_default=True, merge_duplicated_sample_ids=True) self._wait_for_jobs(new) biom_tables = new.build_files(False) # testing that the generated files have the same sample ids biom_ids = [] for _, fp, _ in biom_tables: biom_ids.extend(load_table(fp).ids(axis='sample')) mapping_fp = qdb.util.get_filepath_information( new.mapping_file)['fullpath'] mf_ids = qdb.metadata_template.util.load_template_to_dataframe( mapping_fp, index='#SampleID').index self.assertCountEqual(biom_ids, mf_ids) # now that the samples have been prefixed exp = ['4.1.SKM9.640192', '4.1.SKM4.640180', '4.1.SKD8.640184', '4.1.SKB8.640193', '4.1.SKB7.640196', '5.1.SKM9.640192', '5.1.SKM4.640180', '5.1.SKD8.640184', '5.1.SKB8.640193', '5.1.SKB7.640196', '6.1.SKM9.640192', '6.1.SKM4.640180', '6.1.SKD8.640184', '6.1.SKB8.640193', '6.1.SKB7.640196'] self.assertCountEqual(biom_ids, exp) def test_add_file(self): # Tested indirectly through build_files pass def test_is_public_make_public(self): analysis = self._create_analyses_with_samples() self.assertFalse(analysis.is_public) # testing errors with self.assertRaises(ValueError): analysis.make_public() # testing successfully making public # 4 is the only artifact being used in _create_analyses_with_samples qdb.artifact.Artifact(4).visibility = 'public' analysis.make_public() self.assertTrue(analysis.is_public) if __name__ == "__main__": main()
en
0.813054
# ----------------------------------------------------------------------------- # Copyright (c) 2014--, The Qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- # fullpaths for testing Aux function to create an analysis with samples Parameters ---------- user : qiita_db.user.User, optional The user email to attach to the analysis. Default: <EMAIL> merge : bool, optional Merge duplicated ids or not Returns ------- qiita_db.analysis.Analysis Notes ----- Replicates the samples contained in Analysis(1) at the moment of creation of this function (September 15, 2016) # The default analysis can have samples added/removed # make sure portal is associated # successful delete # no possible to delete # Analysis with artifacts # generating here as the tgz is only generated once the analysis runs # to completion (un)successfully # assert_frame_equal assumes same order on the rows, thus sorting # frames by index # then sorting columns # assert_frame_equal assumes same order on the rows, thus sorting # frames by index # then sorting columns # assert_frame_equal assumes same order on the rows, thus sorting # frames by index # then sorting columns # testing that the generated files have the same sample ids # now that the samples have been prefixed # set a known artifact's additional processing command # to a known value. Then test for it. # qiita_db/test/support_files/worker.py will work w/py2.7 & 3.6 envs. # no additional parameters are needed for worker.py # fp_biom and fp_archive will be generated by build_files() # convert to json representation and store in PostgreSQL UPDATE qiita.software_command SET post_processing_cmd = %s WHERE command_id = %s # create a sample analysis and run build_files on it. # if build_files used additional processing commands, it will # return a couple of tuples, where the third element contains # output archive-artifact data. # cleanup (assume command was NULL previously) UPDATE qiita.software_command SET post_processing_cmd = NULL WHERE command_id = %s # testing that the generated files have the same sample ids # now that the samples have been prefixed # Tested indirectly through build_files # testing errors # testing successfully making public # 4 is the only artifact being used in _create_analyses_with_samples
2.028823
2
test/run/t89.py
timmartin/skulpt
2,671
6631499
print {(1,3):'OK'}[(1,3)]
print {(1,3):'OK'}[(1,3)]
none
1
1.672862
2
activity_service_comm/main.py
shivan1b/kivy_osc
0
6631500
''' Activity ''' from kivy.app import App from kivy.utils import platform from kivy.lib import osc from kivy.clock import Clock from kivy.uix.button import Button activity_port = 8008 service_port = 8000 class MyActivity(App): ''' Defines the actions to be performed by Activity ''' def build(self): if platform == 'android': from android import AndroidService service = AndroidService('My Activity', 'running') service.start('service started') self.service = service osc.init() osc_id = osc.listen(ipAddr='127.0.0.1', port=activity_port) osc.bind(osc_id, self.my_callback, '/message') # Listen for messages regularly Clock.schedule_interval(lambda *x: osc.readQueue(osc_id), 0) btn = Button(text='Push me to see OSC working') btn.bind(on_release=self.send_msg_to_service) return btn def my_callback(self, message, *args): print "Activity callback message ", message[2] def send_msg_to_service(self, instance): ''' Send message to the service ''' osc.sendMsg('/message', ['Activity: Hey there', ], port=service_port, typehint=None) if __name__ == '__main__': MyActivity().run()
''' Activity ''' from kivy.app import App from kivy.utils import platform from kivy.lib import osc from kivy.clock import Clock from kivy.uix.button import Button activity_port = 8008 service_port = 8000 class MyActivity(App): ''' Defines the actions to be performed by Activity ''' def build(self): if platform == 'android': from android import AndroidService service = AndroidService('My Activity', 'running') service.start('service started') self.service = service osc.init() osc_id = osc.listen(ipAddr='127.0.0.1', port=activity_port) osc.bind(osc_id, self.my_callback, '/message') # Listen for messages regularly Clock.schedule_interval(lambda *x: osc.readQueue(osc_id), 0) btn = Button(text='Push me to see OSC working') btn.bind(on_release=self.send_msg_to_service) return btn def my_callback(self, message, *args): print "Activity callback message ", message[2] def send_msg_to_service(self, instance): ''' Send message to the service ''' osc.sendMsg('/message', ['Activity: Hey there', ], port=service_port, typehint=None) if __name__ == '__main__': MyActivity().run()
en
0.853102
Activity Defines the actions to be performed by Activity # Listen for messages regularly Send message to the service
3.034075
3
Python/leetcode/letterCombinationsOfPhoneNumber.py
darrencheng0817/AlgorithmLearning
2
6631501
<reponame>darrencheng0817/AlgorithmLearning<filename>Python/leetcode/letterCombinationsOfPhoneNumber.py ''' Created on 2016年1月11日 @author: Darren ''' ''' Given a digit string, return all possible letter combinations that the number could represent. A mapping of digit to letters (just like on the telephone buttons) is given below. Input:Digit string "23" Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]. ''' class Solution(object): def letterCombinations(self, digits): """ :type digits: str :rtype: List[str] """ if not digits: return [] dicts=["abc","def","ghi","jkl","mno","pqrs","tuv","wxyz"] res=[""] for digit in digits: newRes=[pre+newChar for pre in res for newChar in dicts[int(digit)-2]] res=newRes return res so=Solution() digits="23" print(so.letterCombinations(digits))
''' Created on 2016年1月11日 @author: Darren ''' ''' Given a digit string, return all possible letter combinations that the number could represent. A mapping of digit to letters (just like on the telephone buttons) is given below. Input:Digit string "23" Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]. ''' class Solution(object): def letterCombinations(self, digits): """ :type digits: str :rtype: List[str] """ if not digits: return [] dicts=["abc","def","ghi","jkl","mno","pqrs","tuv","wxyz"] res=[""] for digit in digits: newRes=[pre+newChar for pre in res for newChar in dicts[int(digit)-2]] res=newRes return res so=Solution() digits="23" print(so.letterCombinations(digits))
en
0.577041
Created on 2016年1月11日 @author: Darren Given a digit string, return all possible letter combinations that the number could represent. A mapping of digit to letters (just like on the telephone buttons) is given below. Input:Digit string "23" Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]. :type digits: str :rtype: List[str]
4.137035
4
0144 Binary Tree Preorder Traversal.py
MdAbedin/leetcode
4
6631502
<gh_stars>1-10 # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def preorderTraversal(self, root: TreeNode) -> List[int]: if not root or not root.val: return [] lefts, rights = deque(), deque() lefts.append(root) ans = [] while lefts or rights: if lefts: cur = lefts.popleft() if cur: ans.append(cur.val) if cur.left: lefts.appendleft(cur.left) if cur.right: rights.appendleft(cur.right) else: cur = rights.popleft() if cur: ans.append(cur.val) if cur.left: lefts.appendleft(cur.left) if cur.right: rights.appendleft(cur.right) return ans
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def preorderTraversal(self, root: TreeNode) -> List[int]: if not root or not root.val: return [] lefts, rights = deque(), deque() lefts.append(root) ans = [] while lefts or rights: if lefts: cur = lefts.popleft() if cur: ans.append(cur.val) if cur.left: lefts.appendleft(cur.left) if cur.right: rights.appendleft(cur.right) else: cur = rights.popleft() if cur: ans.append(cur.val) if cur.left: lefts.appendleft(cur.left) if cur.right: rights.appendleft(cur.right) return ans
en
0.60307
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None
3.801752
4
lib/upy2/typesetting/__init__.py
friedrichromstedt/upy
3
6631503
<reponame>friedrichromstedt/upy from upy2.typesetting.scientific import ScientificTypesetter
from upy2.typesetting.scientific import ScientificTypesetter
none
1
1.082565
1
pelicanconf.py
yang2lalang/blog
0
6631504
<reponame>yang2lalang/blog #!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals from datetime import datetime AUTHOR = u'<NAME>' SITENAME = u'Franklin is Blogging' SITEURL = 'http://localhost:8000' SITETITLE = AUTHOR SITESUBTITLE = 'Robotics Software Engineer, Part time Trader, Consultant' SITEDESCRIPTION = '%s\'s My thoughts and a few tutorials' % AUTHOR SITELOGO = './images/cartoon_ik.png' PATH = 'content' RELATIVE_URLS = True BROWSER_COLOR = '#333333' PYGMENTS_STYLE = 'monokai' ROBOTS = 'index, follow' TIMEZONE = 'Europe/Paris' I18N_TEMPLATES_LANG = 'en' DEFAULT_LANG = 'en' OG_LOCALE = 'en_US' LOCALE = 'en_US' DATE_FORMATS = { 'en': '%B %d, %Y', } USE_FOLDER_AS_CATEGORY = False MAIN_MENU = True HOME_HIDE_TAGS = True # Feed Items FEED_MAX_ITEMS = 15 FEED_ALL_ATOM = 'feeds/all.atom.xml' CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml' LINKS = (('Home', 'http://yang2lalang.com'),('Researchgate', 'https://www.researchgate.net/profile/Franklin_Okoli'),) SOCIAL = (('linkedin', 'http://www.linkedin.com/in/franklinokoli/'), ('github', 'https://github.com/yang2lalang'), ("rss", FEED_ALL_ATOM), ) MENUITEMS = (('Archives', '/archives.html'), ('Categories', '/categories.html'), ('Tags', '/tags.html'),) CC_LICENSE = { 'name': 'Creative Commons Attribution-ShareAlike', 'version': '4.0', 'slug': 'by-sa' } COPYRIGHT_YEAR = datetime.now().year DEFAULT_PAGINATION = 10 OUTPUT_PATH = 'docs/' # Uncomment following line if you want document-relative URLs when developing # MARKUP = ('md') STATIC_PATHS = ['images', 'extra','articles',"extra/CNAME","extra/custom.css"] PLUGIN_PATHS = ['./pelican-plugins'] PLUGINS = ['sitemap', 'post_stats','i18n_subsites'] IGNORE_FILES = ['.ipynb_checkpoints'] THEME = './Flex' EXTRA_PATH_METADATA = { "extra/CNAME": {"path": "CNAME"}, } CUSTOM_CSS = "extra/custom.css" THEME_COLOR_AUTO_DETECT_BROWSER_PREFERENCE = True THEME_COLOR_ENABLE_USER_OVERRIDE = True JINJA_ENVIRONMENT = {'extensions': ['jinja2.ext.i18n']} SITEMAP = { 'format': 'xml', 'priorities': { 'articles': 0.6, 'indexes': 0.6, 'pages': 0.5, }, 'changefreqs': { 'articles': 'monthly', 'indexes': 'daily', 'pages': 'monthly', } } DISQUS_SITENAME = "http-yang2lalang-github-io" ADD_THIS_ID = 'ra-5bac97e477d3d598' GOOGLE_ANALYTICS = "UA-126577907-1" GOOGLE_ADSENSE = { 'ca_id': 'ca-pub-9421131783612830', 'page_level_ads': True, 'ads': { 'aside': '5340595560', 'main_menu': '', 'index_top': '', 'index_bottom': '9584371569', 'article_top': '', 'article_bottom': '7257980762', } }
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals from datetime import datetime AUTHOR = u'<NAME>' SITENAME = u'Franklin is Blogging' SITEURL = 'http://localhost:8000' SITETITLE = AUTHOR SITESUBTITLE = 'Robotics Software Engineer, Part time Trader, Consultant' SITEDESCRIPTION = '%s\'s My thoughts and a few tutorials' % AUTHOR SITELOGO = './images/cartoon_ik.png' PATH = 'content' RELATIVE_URLS = True BROWSER_COLOR = '#333333' PYGMENTS_STYLE = 'monokai' ROBOTS = 'index, follow' TIMEZONE = 'Europe/Paris' I18N_TEMPLATES_LANG = 'en' DEFAULT_LANG = 'en' OG_LOCALE = 'en_US' LOCALE = 'en_US' DATE_FORMATS = { 'en': '%B %d, %Y', } USE_FOLDER_AS_CATEGORY = False MAIN_MENU = True HOME_HIDE_TAGS = True # Feed Items FEED_MAX_ITEMS = 15 FEED_ALL_ATOM = 'feeds/all.atom.xml' CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml' LINKS = (('Home', 'http://yang2lalang.com'),('Researchgate', 'https://www.researchgate.net/profile/Franklin_Okoli'),) SOCIAL = (('linkedin', 'http://www.linkedin.com/in/franklinokoli/'), ('github', 'https://github.com/yang2lalang'), ("rss", FEED_ALL_ATOM), ) MENUITEMS = (('Archives', '/archives.html'), ('Categories', '/categories.html'), ('Tags', '/tags.html'),) CC_LICENSE = { 'name': 'Creative Commons Attribution-ShareAlike', 'version': '4.0', 'slug': 'by-sa' } COPYRIGHT_YEAR = datetime.now().year DEFAULT_PAGINATION = 10 OUTPUT_PATH = 'docs/' # Uncomment following line if you want document-relative URLs when developing # MARKUP = ('md') STATIC_PATHS = ['images', 'extra','articles',"extra/CNAME","extra/custom.css"] PLUGIN_PATHS = ['./pelican-plugins'] PLUGINS = ['sitemap', 'post_stats','i18n_subsites'] IGNORE_FILES = ['.ipynb_checkpoints'] THEME = './Flex' EXTRA_PATH_METADATA = { "extra/CNAME": {"path": "CNAME"}, } CUSTOM_CSS = "extra/custom.css" THEME_COLOR_AUTO_DETECT_BROWSER_PREFERENCE = True THEME_COLOR_ENABLE_USER_OVERRIDE = True JINJA_ENVIRONMENT = {'extensions': ['jinja2.ext.i18n']} SITEMAP = { 'format': 'xml', 'priorities': { 'articles': 0.6, 'indexes': 0.6, 'pages': 0.5, }, 'changefreqs': { 'articles': 'monthly', 'indexes': 'daily', 'pages': 'monthly', } } DISQUS_SITENAME = "http-yang2lalang-github-io" ADD_THIS_ID = 'ra-5bac97e477d3d598' GOOGLE_ANALYTICS = "UA-126577907-1" GOOGLE_ADSENSE = { 'ca_id': 'ca-pub-9421131783612830', 'page_level_ads': True, 'ads': { 'aside': '5340595560', 'main_menu': '', 'index_top': '', 'index_bottom': '9584371569', 'article_top': '', 'article_bottom': '7257980762', } }
en
0.728706
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Feed Items # Uncomment following line if you want document-relative URLs when developing #
1.613186
2
socutils/mongo_connector/database.py
Infosecurity-LLC/socutils
1
6631505
<reponame>Infosecurity-LLC/socutils<gh_stars>1-10 from .client import Client class Database(object): __instance = None def __init__(self, client: Client, label: str = None): """ Database object helps creating instance of mongo connector :param client: mongo_client.Client instance that used for getting config options :param label: Label of the database connected to """ if client is None: raise ValueError('Client instance should be provided to database') self._client = client if label is None: label = client.default_db_label self._label = label @classmethod def create(cls, label: str, host: str, port: int, *args, **kwargs): """ Creates mongo_client.Database instance from specified params :param label: Label of the database connected to :param host: MongoDB host, that'll be used for database connector creating :param port: MongoDB port, that'll be used for database connector creating :param args: additional parameters for mongo_client.Client instance creation :param kwargs: additional parameters for mongo_client.Client instance creation :return: Database instance """ return cls(Client(host=host, port=port, *args, **kwargs), label) @property def label(self) -> str: """ Label of the database connected to :return: Database label """ return self._label @label.setter def label(self, label: str): """ Setter for mongo_client.Database.label :param label: New database label """ self.__instance = None self._label = label @property def client(self) -> Client: """ mongo_client.Client instance that used for getting config options :return: mongo_client.Client instance """ return self._client @client.setter def client(self, client: Client): """ Setter for mongo_client.Database.client :param client: new mongo_client.Database.client """ self.__instance = None self._client = client def set_new_client(self, host: str, port: int, *args, **kwargs): """ Creates new client from specified params and set it to mongo_client.Database.client :param host: MongoDB host, that'll be used for database connector creating :param port: MongoDB port, that'll be used for database connector creating :param args: additional parameters for mongo_client.Client instance creation :param kwargs: additional parameters for mongo_client.Client instance creation """ self.__instance = None self._client = Client(host=host, port=port, *args, **kwargs) def get_instance(self, label: str = None, *args, **kwargs): """ Getting pymongo.MongoClient instance with provided connection parameters :param label: Label of the database connected to, default value is self.label :return: pymongo.MongoClient instance """ from pymongo import MongoClient if label is None: label = self.label if label is None: raise ValueError('Database label should be provided') if self.__instance is None: self.__instance = MongoClient(self.client.mongo_uri)[label] instance = self.__instance else: instance = MongoClient(self.client.mongo_uri, *args, **kwargs)[label] return instance
from .client import Client class Database(object): __instance = None def __init__(self, client: Client, label: str = None): """ Database object helps creating instance of mongo connector :param client: mongo_client.Client instance that used for getting config options :param label: Label of the database connected to """ if client is None: raise ValueError('Client instance should be provided to database') self._client = client if label is None: label = client.default_db_label self._label = label @classmethod def create(cls, label: str, host: str, port: int, *args, **kwargs): """ Creates mongo_client.Database instance from specified params :param label: Label of the database connected to :param host: MongoDB host, that'll be used for database connector creating :param port: MongoDB port, that'll be used for database connector creating :param args: additional parameters for mongo_client.Client instance creation :param kwargs: additional parameters for mongo_client.Client instance creation :return: Database instance """ return cls(Client(host=host, port=port, *args, **kwargs), label) @property def label(self) -> str: """ Label of the database connected to :return: Database label """ return self._label @label.setter def label(self, label: str): """ Setter for mongo_client.Database.label :param label: New database label """ self.__instance = None self._label = label @property def client(self) -> Client: """ mongo_client.Client instance that used for getting config options :return: mongo_client.Client instance """ return self._client @client.setter def client(self, client: Client): """ Setter for mongo_client.Database.client :param client: new mongo_client.Database.client """ self.__instance = None self._client = client def set_new_client(self, host: str, port: int, *args, **kwargs): """ Creates new client from specified params and set it to mongo_client.Database.client :param host: MongoDB host, that'll be used for database connector creating :param port: MongoDB port, that'll be used for database connector creating :param args: additional parameters for mongo_client.Client instance creation :param kwargs: additional parameters for mongo_client.Client instance creation """ self.__instance = None self._client = Client(host=host, port=port, *args, **kwargs) def get_instance(self, label: str = None, *args, **kwargs): """ Getting pymongo.MongoClient instance with provided connection parameters :param label: Label of the database connected to, default value is self.label :return: pymongo.MongoClient instance """ from pymongo import MongoClient if label is None: label = self.label if label is None: raise ValueError('Database label should be provided') if self.__instance is None: self.__instance = MongoClient(self.client.mongo_uri)[label] instance = self.__instance else: instance = MongoClient(self.client.mongo_uri, *args, **kwargs)[label] return instance
en
0.617333
Database object helps creating instance of mongo connector :param client: mongo_client.Client instance that used for getting config options :param label: Label of the database connected to Creates mongo_client.Database instance from specified params :param label: Label of the database connected to :param host: MongoDB host, that'll be used for database connector creating :param port: MongoDB port, that'll be used for database connector creating :param args: additional parameters for mongo_client.Client instance creation :param kwargs: additional parameters for mongo_client.Client instance creation :return: Database instance Label of the database connected to :return: Database label Setter for mongo_client.Database.label :param label: New database label mongo_client.Client instance that used for getting config options :return: mongo_client.Client instance Setter for mongo_client.Database.client :param client: new mongo_client.Database.client Creates new client from specified params and set it to mongo_client.Database.client :param host: MongoDB host, that'll be used for database connector creating :param port: MongoDB port, that'll be used for database connector creating :param args: additional parameters for mongo_client.Client instance creation :param kwargs: additional parameters for mongo_client.Client instance creation Getting pymongo.MongoClient instance with provided connection parameters :param label: Label of the database connected to, default value is self.label :return: pymongo.MongoClient instance
3.363742
3
tests/test_middleware.py
camuthig/django-user-api-key
0
6631506
from datetime import timedelta from django.contrib.auth import get_user_model from django.test import RequestFactory from django.test import TestCase from django.test.utils import override_settings from django.utils import timezone from django_pat.middleware import PatAuthenticationMiddleware from django_pat.models import PersonalAccessToken User = get_user_model() class TestPatAuthenticationMiddleware(TestCase): def setUp(self): self.request_factory = RequestFactory() def test_it_parses_default_header(self): user = User.objects.create_user("testuser", "<EMAIL>", "random-insecure-text") token, token_val = PersonalAccessToken.objects.create_token(user, "name") def handle(request): self.assertTrue(request.user.is_authenticated) self.assertEqual(user, request.user) m = PatAuthenticationMiddleware(handle) req = self.request_factory.get("path", HTTP_AUTHORIZATION=f"Access-Token {token_val}") m(req) @override_settings(PAT_CUSTOM_HEADER="X-Custom-Key") def test_it_supports_a_custom_header(self): user = User.objects.create_user("testuser", "<EMAIL>", "random-insecure-text") token, token_val = PersonalAccessToken.objects.create_token(user, "name") def handle(request): self.assertEqual(user, request.user) m = PatAuthenticationMiddleware(handle) req = self.request_factory.get("path", HTTP_X_CUSTOM_KEY=f"Access-Token {token_val}") m(req) @override_settings(PAT_CUSTOM_HEADER_PREFIX="Custom-Key") def test_it_supports_a_custom_prefix(self): user = User.objects.create_user("testuser", "<EMAIL>", "random-insecure-text") token, token_val = PersonalAccessToken.objects.create_token(user, "name") def handle(request): self.assertEqual(user, request.user) m = PatAuthenticationMiddleware(handle) req = self.request_factory.get("path", HTTP_AUTHORIZATION=f"Custom-Key {token_val}") m(req) def test_it_sets_last_used_at(self): user = User.objects.create_user("testuser", "<EMAIL>", "random-insecure-text") token, token_val = PersonalAccessToken.objects.create_token(user, "name") last_used_at = timezone.now() - timedelta(days=1) token.last_used_at = last_used_at token.save() def handle(request): token.refresh_from_db() self.assertEqual(last_used_at, token.last_used_at) # Access the request's user to evaluate the lazy object self.assertEqual(user, request.user) token.refresh_from_db() self.assertGreater(token.last_used_at, timezone.now() - timedelta(minutes=1)) m = PatAuthenticationMiddleware(handle) req = self.request_factory.get("path", HTTP_AUTHORIZATION=f"Access-Token {token_val}") m(req) def test_it_does_not_use_revoked_tokens(self): user = User.objects.create_user("testuser", "<EMAIL>", "random-insecure-text") token, token_val = PersonalAccessToken.objects.create_token(user, "name") token.revoked_at = timezone.now() token.save() def handle(request): self.assertFalse(request.user.is_authenticated) m = PatAuthenticationMiddleware(handle) req = self.request_factory.get("path", HTTP_AUTHORIZATION=f"Access-Token {token_val}") m(req)
from datetime import timedelta from django.contrib.auth import get_user_model from django.test import RequestFactory from django.test import TestCase from django.test.utils import override_settings from django.utils import timezone from django_pat.middleware import PatAuthenticationMiddleware from django_pat.models import PersonalAccessToken User = get_user_model() class TestPatAuthenticationMiddleware(TestCase): def setUp(self): self.request_factory = RequestFactory() def test_it_parses_default_header(self): user = User.objects.create_user("testuser", "<EMAIL>", "random-insecure-text") token, token_val = PersonalAccessToken.objects.create_token(user, "name") def handle(request): self.assertTrue(request.user.is_authenticated) self.assertEqual(user, request.user) m = PatAuthenticationMiddleware(handle) req = self.request_factory.get("path", HTTP_AUTHORIZATION=f"Access-Token {token_val}") m(req) @override_settings(PAT_CUSTOM_HEADER="X-Custom-Key") def test_it_supports_a_custom_header(self): user = User.objects.create_user("testuser", "<EMAIL>", "random-insecure-text") token, token_val = PersonalAccessToken.objects.create_token(user, "name") def handle(request): self.assertEqual(user, request.user) m = PatAuthenticationMiddleware(handle) req = self.request_factory.get("path", HTTP_X_CUSTOM_KEY=f"Access-Token {token_val}") m(req) @override_settings(PAT_CUSTOM_HEADER_PREFIX="Custom-Key") def test_it_supports_a_custom_prefix(self): user = User.objects.create_user("testuser", "<EMAIL>", "random-insecure-text") token, token_val = PersonalAccessToken.objects.create_token(user, "name") def handle(request): self.assertEqual(user, request.user) m = PatAuthenticationMiddleware(handle) req = self.request_factory.get("path", HTTP_AUTHORIZATION=f"Custom-Key {token_val}") m(req) def test_it_sets_last_used_at(self): user = User.objects.create_user("testuser", "<EMAIL>", "random-insecure-text") token, token_val = PersonalAccessToken.objects.create_token(user, "name") last_used_at = timezone.now() - timedelta(days=1) token.last_used_at = last_used_at token.save() def handle(request): token.refresh_from_db() self.assertEqual(last_used_at, token.last_used_at) # Access the request's user to evaluate the lazy object self.assertEqual(user, request.user) token.refresh_from_db() self.assertGreater(token.last_used_at, timezone.now() - timedelta(minutes=1)) m = PatAuthenticationMiddleware(handle) req = self.request_factory.get("path", HTTP_AUTHORIZATION=f"Access-Token {token_val}") m(req) def test_it_does_not_use_revoked_tokens(self): user = User.objects.create_user("testuser", "<EMAIL>", "random-insecure-text") token, token_val = PersonalAccessToken.objects.create_token(user, "name") token.revoked_at = timezone.now() token.save() def handle(request): self.assertFalse(request.user.is_authenticated) m = PatAuthenticationMiddleware(handle) req = self.request_factory.get("path", HTTP_AUTHORIZATION=f"Access-Token {token_val}") m(req)
en
0.82564
# Access the request's user to evaluate the lazy object
2.207871
2
features.py
yctao7/Mobile-Phone-Opening-Prediction
0
6631507
import pandas as pd from prep import get_ratio from datetime import datetime, timedelta stime, etime = 'formatted_start_time', 'formatted_end_time' dformat = '%Y-%m-%d %H:%M:%S' def get_between_with_duration(df0, target, merge_way, s, e, duration=None): df = df0.copy() df[stime] = pd.to_datetime(df[stime]) df[etime] = pd.to_datetime(df[etime]) df_avg = None if duration is None: duration = 'manual_duration' df[duration] = (df[etime] - df[stime]).apply(lambda x: x.total_seconds()) df = df[['timestamps', stime, etime, 'enSN', target, duration]] df_avg = pd.DataFrame(columns=df.columns) df1 = df[(df[etime] >= s) & (df[stime] < e)].reset_index(drop=True).copy() if len(df1) == 0: df1 = pd.DataFrame({'timestamps': df0['timestamps'].iat[0], 'enSN': df0['enSN'].iat[0], target: 0.0, duration: 0.0}, index=[0]) else: ratios = df1.apply(lambda r: get_ratio(r[stime], r[etime], s, e), axis=1) df1[duration] *= ratios if merge_way == 'mean': df1[target] = df1[target] * df1[duration] df1 = df1.groupby(['timestamps', 'enSN'])[target, duration].sum().reset_index() df1[target] = df1[target] / df1[duration] if merge_way == 'sum': df1[target] *= ratios df1 = df1.groupby(['timestamps', 'enSN'])[target, duration].sum().reset_index() df1[stime], df1[etime]= s, e df_avg = df_avg.append(df1, ignore_index=True) df_avg[target] = pd.to_numeric(df_avg[target]) #df_avg.drop(columns=[duration], inplace=True) df_avg[duration] = pd.to_numeric(df_avg[duration]) #df_avg[stime] = df_avg[stime].apply(lambda s: datetime.strftime(s, dformat)) #df_avg[etime] = df_avg[etime].apply(lambda s: datetime.strftime(s, dformat)) return df_avg[target].iat[0], df_avg[duration].iat[0] def get_between(df0, target, merge_way, s, e, duration=None): targ, _ = get_between_with_duration(df0, target, merge_way, s, e, duration) return targ def get_point(df0, target, t, default_value=0): df = df0.copy() df[stime] = pd.to_datetime(df[stime]) df[etime] = pd.to_datetime(df[etime]) df1 = df[(df[etime] >= t) & (df[stime] <= t)] if len(df1) == 0: return default_value else: return df1[target].iat[0] def get_mode(df, target, t, default_value=1): d = {1: [1, 0, 0, 0], 2: [0, 1, 0, 0], 4: [0, 0, 1, 0], 5: [0, 0, 0, 1]} if int(get_point(df, target, t, default_value)) == 0: print(target, t) return d[int(get_point(df, target, t, default_value))] def get_bright_session_num(df, t, duration=10): t_before = t - timedelta(minutes=duration) return len(df[(df['last_bright_start_time'] <= t) & (df['last_bright_start_time'] >= t_before)]) def get_app_num(df_disp, t, duration=10): df_disp = df_disp.copy() t_before = t - timedelta(minutes=duration) df_disp[stime] = pd.to_datetime(df_disp[stime]) df_disp[etime] = pd.to_datetime(df_disp[etime]) df = df_disp[(df_disp[etime] >= t_before) & (df_disp[stime] <= t)] return len(df['name'].unique()) def get_apps_between_with_duration(df, target, merge_way, s, e, apps, duration=None): if len(apps) == 0: return 0, 0 df0 = df[df['name'].isin(apps)].copy() return get_between_with_duration(df0, target, merge_way, s, e, duration) if __name__ == '__main__': # df = pd.read_csv('./data/db_brightness_detail.csv') # df_user = df[df['enSN'] == 'ELS000040'].reset_index(drop=True) # target, duration = 'brightness', 'duration' # s = datetime(2020, 5, 17, 9, 36, 19) # e = datetime(2020, 5, 17, 9, 38, 20) # df_user_norm = get_between(df_user, target, 'sum', s, e, duration) df1 = pd.read_csv('./data/db_ambient_light_detail.csv') df_user1 = df1[df1['enSN'] == 'ELS000040'].reset_index(drop=True) target = 'level' t = datetime(2020, 5, 17, 11, 26, 32) point = get_point(df_user1, target, t)
import pandas as pd from prep import get_ratio from datetime import datetime, timedelta stime, etime = 'formatted_start_time', 'formatted_end_time' dformat = '%Y-%m-%d %H:%M:%S' def get_between_with_duration(df0, target, merge_way, s, e, duration=None): df = df0.copy() df[stime] = pd.to_datetime(df[stime]) df[etime] = pd.to_datetime(df[etime]) df_avg = None if duration is None: duration = 'manual_duration' df[duration] = (df[etime] - df[stime]).apply(lambda x: x.total_seconds()) df = df[['timestamps', stime, etime, 'enSN', target, duration]] df_avg = pd.DataFrame(columns=df.columns) df1 = df[(df[etime] >= s) & (df[stime] < e)].reset_index(drop=True).copy() if len(df1) == 0: df1 = pd.DataFrame({'timestamps': df0['timestamps'].iat[0], 'enSN': df0['enSN'].iat[0], target: 0.0, duration: 0.0}, index=[0]) else: ratios = df1.apply(lambda r: get_ratio(r[stime], r[etime], s, e), axis=1) df1[duration] *= ratios if merge_way == 'mean': df1[target] = df1[target] * df1[duration] df1 = df1.groupby(['timestamps', 'enSN'])[target, duration].sum().reset_index() df1[target] = df1[target] / df1[duration] if merge_way == 'sum': df1[target] *= ratios df1 = df1.groupby(['timestamps', 'enSN'])[target, duration].sum().reset_index() df1[stime], df1[etime]= s, e df_avg = df_avg.append(df1, ignore_index=True) df_avg[target] = pd.to_numeric(df_avg[target]) #df_avg.drop(columns=[duration], inplace=True) df_avg[duration] = pd.to_numeric(df_avg[duration]) #df_avg[stime] = df_avg[stime].apply(lambda s: datetime.strftime(s, dformat)) #df_avg[etime] = df_avg[etime].apply(lambda s: datetime.strftime(s, dformat)) return df_avg[target].iat[0], df_avg[duration].iat[0] def get_between(df0, target, merge_way, s, e, duration=None): targ, _ = get_between_with_duration(df0, target, merge_way, s, e, duration) return targ def get_point(df0, target, t, default_value=0): df = df0.copy() df[stime] = pd.to_datetime(df[stime]) df[etime] = pd.to_datetime(df[etime]) df1 = df[(df[etime] >= t) & (df[stime] <= t)] if len(df1) == 0: return default_value else: return df1[target].iat[0] def get_mode(df, target, t, default_value=1): d = {1: [1, 0, 0, 0], 2: [0, 1, 0, 0], 4: [0, 0, 1, 0], 5: [0, 0, 0, 1]} if int(get_point(df, target, t, default_value)) == 0: print(target, t) return d[int(get_point(df, target, t, default_value))] def get_bright_session_num(df, t, duration=10): t_before = t - timedelta(minutes=duration) return len(df[(df['last_bright_start_time'] <= t) & (df['last_bright_start_time'] >= t_before)]) def get_app_num(df_disp, t, duration=10): df_disp = df_disp.copy() t_before = t - timedelta(minutes=duration) df_disp[stime] = pd.to_datetime(df_disp[stime]) df_disp[etime] = pd.to_datetime(df_disp[etime]) df = df_disp[(df_disp[etime] >= t_before) & (df_disp[stime] <= t)] return len(df['name'].unique()) def get_apps_between_with_duration(df, target, merge_way, s, e, apps, duration=None): if len(apps) == 0: return 0, 0 df0 = df[df['name'].isin(apps)].copy() return get_between_with_duration(df0, target, merge_way, s, e, duration) if __name__ == '__main__': # df = pd.read_csv('./data/db_brightness_detail.csv') # df_user = df[df['enSN'] == 'ELS000040'].reset_index(drop=True) # target, duration = 'brightness', 'duration' # s = datetime(2020, 5, 17, 9, 36, 19) # e = datetime(2020, 5, 17, 9, 38, 20) # df_user_norm = get_between(df_user, target, 'sum', s, e, duration) df1 = pd.read_csv('./data/db_ambient_light_detail.csv') df_user1 = df1[df1['enSN'] == 'ELS000040'].reset_index(drop=True) target = 'level' t = datetime(2020, 5, 17, 11, 26, 32) point = get_point(df_user1, target, t)
en
0.376354
#df_avg.drop(columns=[duration], inplace=True) #df_avg[stime] = df_avg[stime].apply(lambda s: datetime.strftime(s, dformat)) #df_avg[etime] = df_avg[etime].apply(lambda s: datetime.strftime(s, dformat)) # df = pd.read_csv('./data/db_brightness_detail.csv') # df_user = df[df['enSN'] == 'ELS000040'].reset_index(drop=True) # target, duration = 'brightness', 'duration' # s = datetime(2020, 5, 17, 9, 36, 19) # e = datetime(2020, 5, 17, 9, 38, 20) # df_user_norm = get_between(df_user, target, 'sum', s, e, duration)
2.923072
3
benchs/tao/bench_learned_termination.py
uoynac/faiss-Tao
0
6631508
<filename>benchs/tao/bench_learned_termination.py #!/usr/bin/env python2 import os import sys import time import numpy as np import re import pickle import argparse import math from multiprocessing.dummy import Pool as ThreadPool sys.path.append('/home/wanghongya/faiss-learned-termination-master/python/') import faiss import util ################################################################# # Bookkeeping ################################################################# # Where the dataset base, query, learn files are stored. DB_DIR = '/home/wanghongya/sift1B/' # Where the ground truth files are stored. GT_DIR = 'ground_truth/' # Where the *_trained.index files are stored. TRAINED_IDX_DIR = 'trained_index/' # Where the *_populated.index files and cluster indices are stored. # NOTE that the *_populated.index files can be as large as tens of GBs. POPULATED_IDX_DIR = 'populated_index/' # Where the trained prediction model and training logs are stored. MODEL_DIR = 'training_model/' # Where the training and testing data files are stored. TRAINING_DIR = 'training_data/' if not os.path.isdir(POPULATED_IDX_DIR): print("%s does not exist, creating it" % POPULATED_IDX_DIR) os.mkdir(POPULATED_IDX_DIR) if not os.path.isdir(TRAINING_DIR): print ("%s does not exist, creating it" % TRAINING_DIR) os.mkdir(TRAINING_DIR) parser = argparse.ArgumentParser(description='learned termination benchmark') parser.add_argument('-mode', '--searchmode', help='search mode', required=True) parser.add_argument('-batch', '--batchsize', help='batch size', default='1') parser.add_argument('-train', '--trainsize', help='train size', default='0') parser.add_argument('-cluster', '--numcluster', help='number of clusters', default='1') parser.add_argument('-thread', '--numthread', help='number of threads', default='1') parser.add_argument('-thresh', '--predthresh', help='prediction thresholds', default='1') parser.add_argument('-bsearch', '--binarysearch', help='binary search parameters', default='0,0,0') parser.add_argument('-db', '--dbname', help='database name', required=True) parser.add_argument('-idx', '--indexkey', help='index key', required=True) parser.add_argument('-param', '--parametersets', help='parameter sets', required=True, nargs='+') args = vars(parser.parse_args()) # -2 = generate training data. # -1 = generate testing data. # 0 = baseline (fixed nprobe or fixed efSearch). # 1 = decision tree-based early termination. search_mode = int(args['searchmode']) batch_size = int(args['batchsize']) # batch size train_size = int(args['trainsize']) # how many training vectors (in millions) num_cluster = int(args['numcluster']) # number of cluster for IVF index num_thread = int(args['numthread']) # When to make prediction/generate training/testing data during search. # This is related to the intermediate search result features. pred_thresh = [int(x) for x in args['predthresh'].split(',')] # Binary search to find minimum fixed configuration (for baseline) or minimum # prediction multiplier (for early termination) to reach a certain accuracy # target. binary_search = int(args['binarysearch'].split(',')[0]) binary_range = [int(args['binarysearch'].split(',')[1]), int(args['binarysearch'].split(',')[2])] dbname = args['dbname'] # e.g.: SIFT1M index_key = args['indexkey'] # e.g.: IVF1000 parametersets = args['parametersets'] # e.g.: nprobe={1,2} # Number of iterations over all queries (to get stable performance number). num_iter = 4 # When multi-threading is enabled, it indicates that latency measurement # is not the purpose of the experiment. Thus we only run one iteration. if num_thread > 1: num_iter = 1 ################################################################# # Prepare dataset ################################################################# print('Preparing dataset {}'.format(dbname)) if dbname.startswith('SIFT'): dbsize = int(dbname[4:-1]) xb = util.mmap_bvecs('{}1milliard.p1.siftbin'.format(DB_DIR)) xq = util.mmap_bvecs('{}bigann_query.bvecs'.format(DB_DIR)) xt = util.mmap_bvecs('{}bigann_learn.bvecs'.format(DB_DIR)) # trim xb to correct size xb = xb[:dbsize * 1000 * 1000] gt = util.read_tsv('{}gtSIFT{}Mtest.tsv'.format(GT_DIR, dbsize)) if search_mode == 0 and train_size > 0 and binary_search == 1: # Take a sample from the training vector to find the minimum fixed # termination condition to reach different accuracy targets. This is # needed to choose the intermediate search result features when # generating training data. xq = xt[:10000] gt = util.read_tsv('{}gtSIFT{}Mtrain{}M.tsv'.format(GT_DIR, dbsize, train_size))[:10000] if search_mode == -2: xq = xt[:train_size * 1000 * 1000] gt = util.read_tsv('{}gtSIFT{}Mtrain{}M.tsv'.format(GT_DIR, dbsize, train_size)) elif dbname.startswith('DEEP'): DB_DIR = '/home/wanghongya/deep1b/' dbsize = int(dbname[4:-1]) xb = util.mmap_fvecs('{}deep1B_base.fvecs'.format(DB_DIR)) xq = util.mmap_fvecs('{}deep1B_queries.fvecs'.format(DB_DIR)) xt = util.mmap_fvecs('{}deep1B_learn.fvecs'.format(DB_DIR)) # trim xb to correct size xb = xb[:dbsize * 1000 * 1000] gt = util.read_tsv('{}gtDEEP{}Mtest.tsv'.format(GT_DIR, dbsize)) if search_mode == 0 and train_size > 0 and binary_search == 1: # Take a sample from the training vector to find the minimum fixed # termination condition to reach different accuracy targets. This is # needed to choose the intermediate search result features when # generating training data. xq = xt[:10000] gt = util.read_tsv('{}gtDEEP{}Mtrain{}M.tsv'.format(GT_DIR, dbsize, train_size))[:10000] if search_mode == -2: xq = xt[:train_size * 1000 * 1000] gt = util.read_tsv('{}gtDEEP{}Mtrain{}M.tsv'.format(GT_DIR, dbsize, train_size)) elif dbname.startswith('GIST'): DB_DIR = '/home/wanghongya/gist/' xb = util.mmap_fvecs('{}gist_base.fvecs'.format(DB_DIR)) xq = util.mmap_fvecs('{}gist_query.fvecs'.format(DB_DIR)) xt = util.mmap_fvecs('{}gist_learn.fvecs'.format(DB_DIR)) gt = util.read_tsv('{}gtGIST1Mtest.tsv'.format(GT_DIR)) if search_mode == 0 and train_size > 0 and binary_search == 1: # Take a sample from the training vector to find the minimum fixed # termination condition to reach different accuracy targets. This is # needed to choose the intermediate search result features when # generating training data. xq = xt[:10000] gt = util.read_tsv('{}gtGIST1Mtrain500K.tsv'.format(GT_DIR))[:10000] if search_mode == -2: xq = xt gt = util.read_tsv('{}gtGIST1Mtrain500K.tsv'.format(GT_DIR)) elif dbname.startswith('GLOV'): DB_DIR = '/home/wanghongya/dataset/glove/' xb = util.mmap_fvecs('{}glove_base.fvecs'.format(DB_DIR))[110000:] xq = util.mmap_fvecs('{}glove_base.fvecs'.format(DB_DIR))[100000:110000] xt = util.mmap_fvecs('{}glove_base.fvecs'.format(DB_DIR))[:100000] gt = util.read_tsv('{}gtglove1Mtest.tsv'.format(GT_DIR)) if search_mode == 0 and train_size > 0 and binary_search == 1: xq = xt[:10000] gt = util.read_tsv('{}gtglove{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1))[:10000] if search_mode == -2: xq = xt gt = util.read_tsv('{}gtglove{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1)) elif dbname.startswith('IMGN'): DB_DIR = '/home/wanghongya/dataset/imageNet/' GT_DIR = 'gt/' xb = util.mmap_fvecs('{}imageNet_base.fvecs'.format(DB_DIR))[210000:] xq = util.mmap_fvecs('{}imageNet_base.fvecs'.format(DB_DIR))[200000:210000] xt = util.mmap_fvecs('{}imageNet_base.fvecs'.format(DB_DIR))[:200000] gt = util.read_tsv('{}gtimageNet10Mtest.tsv'.format(GT_DIR)) gt1 = np.array(util.read_tsv('{}gtimageNet10Mtest1.tsv'.format(GT_DIR))) if search_mode == 0 and train_size > 0 and binary_search == 1: xq = xt[:10000] gt = util.read_tsv('{}gtimageNet{}Mtrain{}M.tsv'.format(GT_DIR, 10, 1))[:10000] gt1 = np.array(util.read_tsv('{}gtimageNet{}Mtrain{}M1.tsv'.format(GT_DIR, 10, 1))[:10000]) if search_mode == -2: xq = xt gt = util.read_tsv('{}gtimageNet{}Mtrain{}M.tsv'.format(GT_DIR, 10, 1)) gt1 = np.array(util.read_tsv('{}gtimageNet{}Mtrain{}M1.tsv'.format(GT_DIR, 10, 1))) elif dbname.startswith('SONG'): DB_DIR = '/home/wanghongya/dataset/msong/' GT_DIR = 'ground_truth/' xb = util.mmap_fvecs('{}millionSong_base.fvecs'.format(DB_DIR))[110000:] xq = util.mmap_fvecs('{}millionSong_base.fvecs'.format(DB_DIR))[100000:110000] xt = util.mmap_fvecs('{}millionSong_base.fvecs'.format(DB_DIR))[:100000] gt = util.read_tsv('{}gtmsong1Mtest.tsv'.format(GT_DIR)) if search_mode == 0 and train_size > 0 and binary_search == 1: xq = xt[:10000] gt = util.read_tsv('{}gtmsong{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1))[:10000] if search_mode == -2: xq = xt gt = util.read_tsv('{}gtmsong{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1)) elif dbname.startswith('TREV'): DB_DIR = '/home/wanghongya/dataset/trevi/' GT_DIR = 'ground_truth/' xb = util.mmap_fvecs('{}trevi_base.fvecs'.format(DB_DIR))[21000:] xq = util.mmap_fvecs('{}trevi_base.fvecs'.format(DB_DIR))[:1000] xt = util.mmap_fvecs('{}trevi_base.fvecs'.format(DB_DIR))[1000:21000] gt = util.read_tsv('{}gttrevi1Mtest.tsv'.format(GT_DIR)) if search_mode == 0 and train_size > 0 and binary_search == 1: xq = xt[:10000] gt = util.read_tsv('{}gttrevi{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1))[:10000] if search_mode == -2: xq = xt gt = util.read_tsv('{}gttrevi{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1)) else: print >> sys.stderr, 'unknown dataset', dbname sys.exit(1) print("sizes: B {} Q {} T {} gt {}".format(xb.shape, xq.shape, xt.shape, len(gt))) nq, d = xq.shape nb, d = xb.shape ################################################################# # Training ################################################################# def choose_train_size(index_key): # some training vectors for PQ and the PCA n_train = 256 * 1000 if "IVF" in index_key: matches = re.findall('IVF([0-9]+)', index_key) ncentroids = int(matches[0]) n_train = max(n_train, 100 * ncentroids) elif "IMI" in index_key: matches = re.findall('IMI2x([0-9]+)', index_key) nbit = int(matches[0]) n_train = max(n_train, 256 * (1 << nbit)) return n_train def get_trained_index(): filename = "%s%s_%s_trained.index" % ( TRAINED_IDX_DIR, dbname, index_key) if not os.path.exists(filename): index = faiss.index_factory(d, index_key) n_train = choose_train_size(index_key) xtsub = xt[:n_train] print("Keeping {} train vectors".format(xtsub.shape[0])) # make sure the data is actually in RAM and in float xtsub = xtsub.astype('float32').copy() index.verbose = True t0 = time.time() index.train(xtsub) index.verbose = False print("train done in {} s".format(time.time() - t0)) print("storing {}".format(filename)) faiss.write_index(index, filename) else: print("loading {}".format(filename)) index = faiss.read_index(filename) return index ################################################################# # Adding vectors to dataset ################################################################# def rate_limited_imap(f, l): 'a thread pre-processes the next element' pool = ThreadPool(1) res = None for i in l: res_next = pool.apply_async(f, (i[0], i[1], )) if res: yield res.get() res = res_next yield res.get() def matrix_slice_iterator(x, bs): " iterate over the lines of x in blocks of size bs" nb = x.shape[0] block_ranges = [(i0, min(nb, i0 + bs)) for i0 in range(0, nb, bs)] return rate_limited_imap( lambda i0, i1: x[i0:i1].astype('float32').copy(), block_ranges) def get_populated_index(): filename = "%s%s_%s_populated.index" % ( POPULATED_IDX_DIR, dbname, index_key) filenameC = "%s%s_C%d" % ( POPULATED_IDX_DIR, dbname, num_cluster) if not os.path.exists(filename): index = get_trained_index() index.verbose = True i0 = 0 t0 = time.time() for xs in matrix_slice_iterator(xb, 10000000): i1 = i0 + xs.shape[0] print('\radd {}:{}, {} s'.format(i0, i1, time.time() - t0)), sys.stdout.flush() index.add(xs) i0 = i1 print print("Add done in {} s".format(time.time() - t0)) print("storing {}".format(filename)) index.verbose = False faiss.write_index(index, filename) faiss.write_cluster_id(index, filenameC) else: print("loading {}".format(filename)) index = faiss.read_index(filename) return index ################################################################# # Perform searches ################################################################# def compute_recall(result, ground, ncandidate): count = 0 for i in range(len(result)): for j in range(ncandidate): if result[i][j] in ground[i]: count += 1 break return count # Binary search to find minimum fixed configuration (for baseline) or minimum # prediction multiplier (for early termination) to reach a certain accuracy # target. def find_config(target, d, k): ret = float('inf') l, r = binary_range[0], binary_range[1] for key, value in d.items(): if value < target: l = max(l, key+1) else: r = min(r, key) while l <= r: mid = l + int(math.floor((r-l)/2)) # change the config to mid if index_key[:4] == 'HNSW': pa = 'efSearch={}'.format(mid) else: pa = 'nprobe={}'.format(mid) sys.stdout.flush() ps.set_index_parameters(index, pa) totalR = 0.0 for i in range(0, nq, batch_size): query = xq[i:i+batch_size,:] D, I = index.search(query, k) totalR += compute_recall(I[:, :100], gt[i:i+batch_size], 100) totalR = totalR / float(nq) d[mid] = totalR print('{}, accuracy = {}'.format(pa, totalR)) if totalR >= target: ret = min(ret, mid) r = mid-1 else: l = mid+1 if ret != float('inf'): return ret, target else: return find_config(max(d.values()), d, k) # Read (and build if necessary) the search index. index = get_populated_index() # Load the prediction model for HNSW index. if search_mode == 1 and index_key[:4] == 'HNSW': for t in pred_thresh: modelname = '{}{}_{}_model_thresh{}_Log_Full.txt'.format(MODEL_DIR, dbname, index_key, t) faiss.load_model(index, modelname) # Load the prediction model for IVF index. if search_mode == 1 and index_key[:4] != 'HNSW': for t in pred_thresh: modelname = '{}{}_{}_model_thresh{}_Full.txt'.format(MODEL_DIR, dbname, index_key, t) if index_key[:3] == 'OPQ' and int(dbname[4:-1]) == 1000: modelname = '{}{}_{}_model_thresh{}_Log_Full.txt'.format(MODEL_DIR, dbname, index_key, t) faiss.load_model(index, modelname) # Load the pred_thresh into the search index. if search_mode != 0: faiss.load_thresh(index, -1) for t in pred_thresh: faiss.load_thresh(index, t) ps = faiss.ParameterSpace() ps.initialize(index) print("ParameterSpace initialize done") # make sure queries are in RAM xq = xq.astype('float32').copy() # Where the training/testing data will be stored before written to files. if search_mode < 0: if index_key[:4] == 'HNSW': data = [] else: data = [] param_list = [] for param in parametersets: param_list.append(param) recall_list = [0.0]*len(parametersets) latency_list = [0.0]*len(parametersets) faiss.omp_set_num_threads(num_thread) k = 100 # To get the cluster indices where the ground truth nearest neighbor(s) reside # for the IVF case. We need this to determine the minimum termination # condition. This is achieved by combining two things: 1) Using the # write_cluster_id() we wrote the cluster index of all database vectors into # files. 2) In a computeGT.py we performed exhaustive search to find which # database vectors are ground truth nearest neighbor(s). if search_mode < 0 and index_key[:4] != 'HNSW': if index_key[:3] == 'OPQ': pkl_filename = '{}{}_C{}_gtcluster{}_opq.pkl'.format(GT_DIR, dbname, num_cluster, -1*search_mode) else: pkl_filename = '{}{}_C{}_gtcluster{}.pkl'.format(GT_DIR, dbname, num_cluster, -1*search_mode) if not os.path.exists(pkl_filename): if index_key[:3] == 'OPQ': clusterid = (np.fromfile('{}{}_C{}_clusterid_quantized.tsv'.format( POPULATED_IDX_DIR, dbname, num_cluster), dtype='uint64', sep='\t')).reshape(-1, 2) else: clusterid = (np.fromfile('{}/{}_C{}_clusterid.tsv'.format( POPULATED_IDX_DIR, dbname, num_cluster), dtype='uint64', sep='\t')).reshape(-1, 2) gt_clusters = {} # If a database vector is a ground truth nearest neighbor to a query, # insert its index as a key into gt_clusters dict. for i in range(nq): for j in range(len(gt[i])): gt_clusters[gt[i][j]] = 0 # Then for each key, insert its cluster index as the value into # gt_clusters dict. for i in range(len(clusterid)): if clusterid[i][0] in gt_clusters: gt_clusters[clusterid[i][0]] = clusterid[i][1] output = open(pkl_filename, 'wb') pickle.dump(gt_clusters, output) output.close() else: pkl_file = open(pkl_filename, 'rb') gt_clusters = pickle.load(pkl_file) pkl_file.close() faiss.copy_array_to_vector(np.array(util.read_tsv('/home/wanghongya/tmp/glove_pred_efs1_lgb.tsv'), dtype='float32').ravel(), index.lid_pred_vec) hnsw_stats = faiss.cvar.hnsw_stats if binary_search == 0: for it in range(num_iter): print('iteration {}'.format(it)) if k < 10: print(' '*(len(parametersets[-1])+1)+'R@1 R@10 R@100 time(ms)') elif k < 100: print(' '*(len(parametersets[-1])+1)+'R@1 R@10 R@100 time(ms)') else: print(' '*(len(parametersets[-1])+1)+'R@1 R@10 R@100 ndis time(ms)') for param in range(len(parametersets)): sys.stdout.flush() ps.set_index_parameters(index, parametersets[param]) total_recall_at1 = 0.0 total_recall_at10 = 0.0 total_recall_at100 = 0.0 total_latency = 0.0 for i in range(0, nq, batch_size): # When generating training/testing data for the IVF case, # load the cluster indices where the ground truth nearest # neighbor(s) reside. if search_mode < 0 and index_key[:4] != 'HNSW': faiss.load_gt(index, -1) for j in range(batch_size): faiss.load_gt(index, -2) for l in range(len(gt[i+j])): faiss.load_gt(index, int(gt_clusters[gt[i+j][l]])) # When generating training/testing data for the HNSW case, # load the database vector indices of the ground truth # nearest neighbor(s). if search_mode < 0 and index_key[:4] == 'HNSW': faiss.load_gt(index, -1) for j in range(batch_size): faiss.load_gt(index, -2) for l in range(len(gt[i+j])): faiss.load_gt(index, int(gt[i+j][l])) query = xq[i:i+batch_size,:] hnsw_stats.reset() t0 = time.time() D, I = index.search(query, k) t1 = time.time() total_latency += t1-t0 total_recall_at1 += compute_recall(I[:, :1], gt[i:i+batch_size], 1) total_recall_at10 += compute_recall(I[:, :10], gt[i:i+batch_size], 10) total_recall_at100 += compute_recall(I[:, :100], gt[i:i+batch_size], 100) if search_mode < 0: # When generating training/testing data, read the returned # search results (since this is where we stored the # features and targe values). if index_key[:4] == 'HNSW': for j in range(len(I)): line = [] line.append(int(D[j][0])) line.append(i+j) for l in range(1+4*len(pred_thresh)): line.append(D[j][l+1]) data.append(line) else: for j in range(len(I)): line = [] line.append(int(D[j][0])) line.append(i+j) for l in range(10+4*len(pred_thresh)): line.append(D[j][l+1]) data.append(line) tr1 = total_recall_at1 / float(nq) tr10 = total_recall_at10 / float(nq) tr100 = total_recall_at100 / float(nq) tt = total_latency * 1000.0 / nq print(parametersets[param]+ ' '*(len(parametersets[-1])+1-len(parametersets[param]))+ '{:.4f} {:.4f} {:.4f} {:.2f} {:.4f}'.format( round(tr1,4), round(tr10,4), round(tr100,4), hnsw_stats.ndis/nq, round(tt,4))) if it > 0 or num_iter == 1: recall_list[param] += total_recall_at100 latency_list[param] += total_latency # Write the training/testing data files. if search_mode == -1: util.write_tsv(data, '{}{}_{}_test.tsv'.format(TRAINING_DIR, dbname, index_key)) if search_mode == -2: util.write_tsv(data, '{}{}_{}_train.tsv'.format(TRAINING_DIR, dbname, index_key)) denom = float(nq*max(num_iter-1, 1)) recall_list = [x/denom for x in recall_list] latency_list = [round(x*1000.0/denom, 4) for x in latency_list] print('param_list = {}'.format(param_list)) print('recall target = {}'.format(recall_list)) print('average latency(ms) = {}'.format(latency_list)) print('result_{}_{} = {}'.format(dbname, index_key, [latency_list, recall_list])) else: # Binary search to find minimum fixed configuration (for baseline) or minimum # prediction multiplier (for early termination) to reach a certain accuracy # target. target = [0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.96, 0.97, 0.98, 0.99, 1] if dbname.startswith('DEEP10M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.97, 0.98, 0.99, 0.999] if dbname.startswith('GIST1M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.97, 0.98, 0.99, 0.997] if dbname.startswith('GLOV1M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.9668] if dbname.startswith('IMGN1M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.97, 0.98, 0.99, 0.9982] if dbname.startswith('SONG1M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.97, 0.98, 0.99, 1] if dbname.startswith('TREV1M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.97, 0.98, 0.99, 0.997] # For billion-scale, stop at 0.995 because it takes too long to reach 1.0. if int(dbname[4:-1]) == 1000: target = [0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.96, 0.97, 0.98, 0.99, 0.995] res = [] d = {} sys.stdout.flush() ps.set_index_parameters(index, parametersets[0]) for t in target: ret, act_t = find_config(t, d, k) print('To reach recall target {} the min. config/multiplier is {}.'.format(act_t, ret)) res.append(ret) if act_t != t: break print('List of min. config/multiplier = {}'.format(res))
<filename>benchs/tao/bench_learned_termination.py #!/usr/bin/env python2 import os import sys import time import numpy as np import re import pickle import argparse import math from multiprocessing.dummy import Pool as ThreadPool sys.path.append('/home/wanghongya/faiss-learned-termination-master/python/') import faiss import util ################################################################# # Bookkeeping ################################################################# # Where the dataset base, query, learn files are stored. DB_DIR = '/home/wanghongya/sift1B/' # Where the ground truth files are stored. GT_DIR = 'ground_truth/' # Where the *_trained.index files are stored. TRAINED_IDX_DIR = 'trained_index/' # Where the *_populated.index files and cluster indices are stored. # NOTE that the *_populated.index files can be as large as tens of GBs. POPULATED_IDX_DIR = 'populated_index/' # Where the trained prediction model and training logs are stored. MODEL_DIR = 'training_model/' # Where the training and testing data files are stored. TRAINING_DIR = 'training_data/' if not os.path.isdir(POPULATED_IDX_DIR): print("%s does not exist, creating it" % POPULATED_IDX_DIR) os.mkdir(POPULATED_IDX_DIR) if not os.path.isdir(TRAINING_DIR): print ("%s does not exist, creating it" % TRAINING_DIR) os.mkdir(TRAINING_DIR) parser = argparse.ArgumentParser(description='learned termination benchmark') parser.add_argument('-mode', '--searchmode', help='search mode', required=True) parser.add_argument('-batch', '--batchsize', help='batch size', default='1') parser.add_argument('-train', '--trainsize', help='train size', default='0') parser.add_argument('-cluster', '--numcluster', help='number of clusters', default='1') parser.add_argument('-thread', '--numthread', help='number of threads', default='1') parser.add_argument('-thresh', '--predthresh', help='prediction thresholds', default='1') parser.add_argument('-bsearch', '--binarysearch', help='binary search parameters', default='0,0,0') parser.add_argument('-db', '--dbname', help='database name', required=True) parser.add_argument('-idx', '--indexkey', help='index key', required=True) parser.add_argument('-param', '--parametersets', help='parameter sets', required=True, nargs='+') args = vars(parser.parse_args()) # -2 = generate training data. # -1 = generate testing data. # 0 = baseline (fixed nprobe or fixed efSearch). # 1 = decision tree-based early termination. search_mode = int(args['searchmode']) batch_size = int(args['batchsize']) # batch size train_size = int(args['trainsize']) # how many training vectors (in millions) num_cluster = int(args['numcluster']) # number of cluster for IVF index num_thread = int(args['numthread']) # When to make prediction/generate training/testing data during search. # This is related to the intermediate search result features. pred_thresh = [int(x) for x in args['predthresh'].split(',')] # Binary search to find minimum fixed configuration (for baseline) or minimum # prediction multiplier (for early termination) to reach a certain accuracy # target. binary_search = int(args['binarysearch'].split(',')[0]) binary_range = [int(args['binarysearch'].split(',')[1]), int(args['binarysearch'].split(',')[2])] dbname = args['dbname'] # e.g.: SIFT1M index_key = args['indexkey'] # e.g.: IVF1000 parametersets = args['parametersets'] # e.g.: nprobe={1,2} # Number of iterations over all queries (to get stable performance number). num_iter = 4 # When multi-threading is enabled, it indicates that latency measurement # is not the purpose of the experiment. Thus we only run one iteration. if num_thread > 1: num_iter = 1 ################################################################# # Prepare dataset ################################################################# print('Preparing dataset {}'.format(dbname)) if dbname.startswith('SIFT'): dbsize = int(dbname[4:-1]) xb = util.mmap_bvecs('{}1milliard.p1.siftbin'.format(DB_DIR)) xq = util.mmap_bvecs('{}bigann_query.bvecs'.format(DB_DIR)) xt = util.mmap_bvecs('{}bigann_learn.bvecs'.format(DB_DIR)) # trim xb to correct size xb = xb[:dbsize * 1000 * 1000] gt = util.read_tsv('{}gtSIFT{}Mtest.tsv'.format(GT_DIR, dbsize)) if search_mode == 0 and train_size > 0 and binary_search == 1: # Take a sample from the training vector to find the minimum fixed # termination condition to reach different accuracy targets. This is # needed to choose the intermediate search result features when # generating training data. xq = xt[:10000] gt = util.read_tsv('{}gtSIFT{}Mtrain{}M.tsv'.format(GT_DIR, dbsize, train_size))[:10000] if search_mode == -2: xq = xt[:train_size * 1000 * 1000] gt = util.read_tsv('{}gtSIFT{}Mtrain{}M.tsv'.format(GT_DIR, dbsize, train_size)) elif dbname.startswith('DEEP'): DB_DIR = '/home/wanghongya/deep1b/' dbsize = int(dbname[4:-1]) xb = util.mmap_fvecs('{}deep1B_base.fvecs'.format(DB_DIR)) xq = util.mmap_fvecs('{}deep1B_queries.fvecs'.format(DB_DIR)) xt = util.mmap_fvecs('{}deep1B_learn.fvecs'.format(DB_DIR)) # trim xb to correct size xb = xb[:dbsize * 1000 * 1000] gt = util.read_tsv('{}gtDEEP{}Mtest.tsv'.format(GT_DIR, dbsize)) if search_mode == 0 and train_size > 0 and binary_search == 1: # Take a sample from the training vector to find the minimum fixed # termination condition to reach different accuracy targets. This is # needed to choose the intermediate search result features when # generating training data. xq = xt[:10000] gt = util.read_tsv('{}gtDEEP{}Mtrain{}M.tsv'.format(GT_DIR, dbsize, train_size))[:10000] if search_mode == -2: xq = xt[:train_size * 1000 * 1000] gt = util.read_tsv('{}gtDEEP{}Mtrain{}M.tsv'.format(GT_DIR, dbsize, train_size)) elif dbname.startswith('GIST'): DB_DIR = '/home/wanghongya/gist/' xb = util.mmap_fvecs('{}gist_base.fvecs'.format(DB_DIR)) xq = util.mmap_fvecs('{}gist_query.fvecs'.format(DB_DIR)) xt = util.mmap_fvecs('{}gist_learn.fvecs'.format(DB_DIR)) gt = util.read_tsv('{}gtGIST1Mtest.tsv'.format(GT_DIR)) if search_mode == 0 and train_size > 0 and binary_search == 1: # Take a sample from the training vector to find the minimum fixed # termination condition to reach different accuracy targets. This is # needed to choose the intermediate search result features when # generating training data. xq = xt[:10000] gt = util.read_tsv('{}gtGIST1Mtrain500K.tsv'.format(GT_DIR))[:10000] if search_mode == -2: xq = xt gt = util.read_tsv('{}gtGIST1Mtrain500K.tsv'.format(GT_DIR)) elif dbname.startswith('GLOV'): DB_DIR = '/home/wanghongya/dataset/glove/' xb = util.mmap_fvecs('{}glove_base.fvecs'.format(DB_DIR))[110000:] xq = util.mmap_fvecs('{}glove_base.fvecs'.format(DB_DIR))[100000:110000] xt = util.mmap_fvecs('{}glove_base.fvecs'.format(DB_DIR))[:100000] gt = util.read_tsv('{}gtglove1Mtest.tsv'.format(GT_DIR)) if search_mode == 0 and train_size > 0 and binary_search == 1: xq = xt[:10000] gt = util.read_tsv('{}gtglove{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1))[:10000] if search_mode == -2: xq = xt gt = util.read_tsv('{}gtglove{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1)) elif dbname.startswith('IMGN'): DB_DIR = '/home/wanghongya/dataset/imageNet/' GT_DIR = 'gt/' xb = util.mmap_fvecs('{}imageNet_base.fvecs'.format(DB_DIR))[210000:] xq = util.mmap_fvecs('{}imageNet_base.fvecs'.format(DB_DIR))[200000:210000] xt = util.mmap_fvecs('{}imageNet_base.fvecs'.format(DB_DIR))[:200000] gt = util.read_tsv('{}gtimageNet10Mtest.tsv'.format(GT_DIR)) gt1 = np.array(util.read_tsv('{}gtimageNet10Mtest1.tsv'.format(GT_DIR))) if search_mode == 0 and train_size > 0 and binary_search == 1: xq = xt[:10000] gt = util.read_tsv('{}gtimageNet{}Mtrain{}M.tsv'.format(GT_DIR, 10, 1))[:10000] gt1 = np.array(util.read_tsv('{}gtimageNet{}Mtrain{}M1.tsv'.format(GT_DIR, 10, 1))[:10000]) if search_mode == -2: xq = xt gt = util.read_tsv('{}gtimageNet{}Mtrain{}M.tsv'.format(GT_DIR, 10, 1)) gt1 = np.array(util.read_tsv('{}gtimageNet{}Mtrain{}M1.tsv'.format(GT_DIR, 10, 1))) elif dbname.startswith('SONG'): DB_DIR = '/home/wanghongya/dataset/msong/' GT_DIR = 'ground_truth/' xb = util.mmap_fvecs('{}millionSong_base.fvecs'.format(DB_DIR))[110000:] xq = util.mmap_fvecs('{}millionSong_base.fvecs'.format(DB_DIR))[100000:110000] xt = util.mmap_fvecs('{}millionSong_base.fvecs'.format(DB_DIR))[:100000] gt = util.read_tsv('{}gtmsong1Mtest.tsv'.format(GT_DIR)) if search_mode == 0 and train_size > 0 and binary_search == 1: xq = xt[:10000] gt = util.read_tsv('{}gtmsong{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1))[:10000] if search_mode == -2: xq = xt gt = util.read_tsv('{}gtmsong{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1)) elif dbname.startswith('TREV'): DB_DIR = '/home/wanghongya/dataset/trevi/' GT_DIR = 'ground_truth/' xb = util.mmap_fvecs('{}trevi_base.fvecs'.format(DB_DIR))[21000:] xq = util.mmap_fvecs('{}trevi_base.fvecs'.format(DB_DIR))[:1000] xt = util.mmap_fvecs('{}trevi_base.fvecs'.format(DB_DIR))[1000:21000] gt = util.read_tsv('{}gttrevi1Mtest.tsv'.format(GT_DIR)) if search_mode == 0 and train_size > 0 and binary_search == 1: xq = xt[:10000] gt = util.read_tsv('{}gttrevi{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1))[:10000] if search_mode == -2: xq = xt gt = util.read_tsv('{}gttrevi{}Mtrain{}M.tsv'.format(GT_DIR, 1, 1)) else: print >> sys.stderr, 'unknown dataset', dbname sys.exit(1) print("sizes: B {} Q {} T {} gt {}".format(xb.shape, xq.shape, xt.shape, len(gt))) nq, d = xq.shape nb, d = xb.shape ################################################################# # Training ################################################################# def choose_train_size(index_key): # some training vectors for PQ and the PCA n_train = 256 * 1000 if "IVF" in index_key: matches = re.findall('IVF([0-9]+)', index_key) ncentroids = int(matches[0]) n_train = max(n_train, 100 * ncentroids) elif "IMI" in index_key: matches = re.findall('IMI2x([0-9]+)', index_key) nbit = int(matches[0]) n_train = max(n_train, 256 * (1 << nbit)) return n_train def get_trained_index(): filename = "%s%s_%s_trained.index" % ( TRAINED_IDX_DIR, dbname, index_key) if not os.path.exists(filename): index = faiss.index_factory(d, index_key) n_train = choose_train_size(index_key) xtsub = xt[:n_train] print("Keeping {} train vectors".format(xtsub.shape[0])) # make sure the data is actually in RAM and in float xtsub = xtsub.astype('float32').copy() index.verbose = True t0 = time.time() index.train(xtsub) index.verbose = False print("train done in {} s".format(time.time() - t0)) print("storing {}".format(filename)) faiss.write_index(index, filename) else: print("loading {}".format(filename)) index = faiss.read_index(filename) return index ################################################################# # Adding vectors to dataset ################################################################# def rate_limited_imap(f, l): 'a thread pre-processes the next element' pool = ThreadPool(1) res = None for i in l: res_next = pool.apply_async(f, (i[0], i[1], )) if res: yield res.get() res = res_next yield res.get() def matrix_slice_iterator(x, bs): " iterate over the lines of x in blocks of size bs" nb = x.shape[0] block_ranges = [(i0, min(nb, i0 + bs)) for i0 in range(0, nb, bs)] return rate_limited_imap( lambda i0, i1: x[i0:i1].astype('float32').copy(), block_ranges) def get_populated_index(): filename = "%s%s_%s_populated.index" % ( POPULATED_IDX_DIR, dbname, index_key) filenameC = "%s%s_C%d" % ( POPULATED_IDX_DIR, dbname, num_cluster) if not os.path.exists(filename): index = get_trained_index() index.verbose = True i0 = 0 t0 = time.time() for xs in matrix_slice_iterator(xb, 10000000): i1 = i0 + xs.shape[0] print('\radd {}:{}, {} s'.format(i0, i1, time.time() - t0)), sys.stdout.flush() index.add(xs) i0 = i1 print print("Add done in {} s".format(time.time() - t0)) print("storing {}".format(filename)) index.verbose = False faiss.write_index(index, filename) faiss.write_cluster_id(index, filenameC) else: print("loading {}".format(filename)) index = faiss.read_index(filename) return index ################################################################# # Perform searches ################################################################# def compute_recall(result, ground, ncandidate): count = 0 for i in range(len(result)): for j in range(ncandidate): if result[i][j] in ground[i]: count += 1 break return count # Binary search to find minimum fixed configuration (for baseline) or minimum # prediction multiplier (for early termination) to reach a certain accuracy # target. def find_config(target, d, k): ret = float('inf') l, r = binary_range[0], binary_range[1] for key, value in d.items(): if value < target: l = max(l, key+1) else: r = min(r, key) while l <= r: mid = l + int(math.floor((r-l)/2)) # change the config to mid if index_key[:4] == 'HNSW': pa = 'efSearch={}'.format(mid) else: pa = 'nprobe={}'.format(mid) sys.stdout.flush() ps.set_index_parameters(index, pa) totalR = 0.0 for i in range(0, nq, batch_size): query = xq[i:i+batch_size,:] D, I = index.search(query, k) totalR += compute_recall(I[:, :100], gt[i:i+batch_size], 100) totalR = totalR / float(nq) d[mid] = totalR print('{}, accuracy = {}'.format(pa, totalR)) if totalR >= target: ret = min(ret, mid) r = mid-1 else: l = mid+1 if ret != float('inf'): return ret, target else: return find_config(max(d.values()), d, k) # Read (and build if necessary) the search index. index = get_populated_index() # Load the prediction model for HNSW index. if search_mode == 1 and index_key[:4] == 'HNSW': for t in pred_thresh: modelname = '{}{}_{}_model_thresh{}_Log_Full.txt'.format(MODEL_DIR, dbname, index_key, t) faiss.load_model(index, modelname) # Load the prediction model for IVF index. if search_mode == 1 and index_key[:4] != 'HNSW': for t in pred_thresh: modelname = '{}{}_{}_model_thresh{}_Full.txt'.format(MODEL_DIR, dbname, index_key, t) if index_key[:3] == 'OPQ' and int(dbname[4:-1]) == 1000: modelname = '{}{}_{}_model_thresh{}_Log_Full.txt'.format(MODEL_DIR, dbname, index_key, t) faiss.load_model(index, modelname) # Load the pred_thresh into the search index. if search_mode != 0: faiss.load_thresh(index, -1) for t in pred_thresh: faiss.load_thresh(index, t) ps = faiss.ParameterSpace() ps.initialize(index) print("ParameterSpace initialize done") # make sure queries are in RAM xq = xq.astype('float32').copy() # Where the training/testing data will be stored before written to files. if search_mode < 0: if index_key[:4] == 'HNSW': data = [] else: data = [] param_list = [] for param in parametersets: param_list.append(param) recall_list = [0.0]*len(parametersets) latency_list = [0.0]*len(parametersets) faiss.omp_set_num_threads(num_thread) k = 100 # To get the cluster indices where the ground truth nearest neighbor(s) reside # for the IVF case. We need this to determine the minimum termination # condition. This is achieved by combining two things: 1) Using the # write_cluster_id() we wrote the cluster index of all database vectors into # files. 2) In a computeGT.py we performed exhaustive search to find which # database vectors are ground truth nearest neighbor(s). if search_mode < 0 and index_key[:4] != 'HNSW': if index_key[:3] == 'OPQ': pkl_filename = '{}{}_C{}_gtcluster{}_opq.pkl'.format(GT_DIR, dbname, num_cluster, -1*search_mode) else: pkl_filename = '{}{}_C{}_gtcluster{}.pkl'.format(GT_DIR, dbname, num_cluster, -1*search_mode) if not os.path.exists(pkl_filename): if index_key[:3] == 'OPQ': clusterid = (np.fromfile('{}{}_C{}_clusterid_quantized.tsv'.format( POPULATED_IDX_DIR, dbname, num_cluster), dtype='uint64', sep='\t')).reshape(-1, 2) else: clusterid = (np.fromfile('{}/{}_C{}_clusterid.tsv'.format( POPULATED_IDX_DIR, dbname, num_cluster), dtype='uint64', sep='\t')).reshape(-1, 2) gt_clusters = {} # If a database vector is a ground truth nearest neighbor to a query, # insert its index as a key into gt_clusters dict. for i in range(nq): for j in range(len(gt[i])): gt_clusters[gt[i][j]] = 0 # Then for each key, insert its cluster index as the value into # gt_clusters dict. for i in range(len(clusterid)): if clusterid[i][0] in gt_clusters: gt_clusters[clusterid[i][0]] = clusterid[i][1] output = open(pkl_filename, 'wb') pickle.dump(gt_clusters, output) output.close() else: pkl_file = open(pkl_filename, 'rb') gt_clusters = pickle.load(pkl_file) pkl_file.close() faiss.copy_array_to_vector(np.array(util.read_tsv('/home/wanghongya/tmp/glove_pred_efs1_lgb.tsv'), dtype='float32').ravel(), index.lid_pred_vec) hnsw_stats = faiss.cvar.hnsw_stats if binary_search == 0: for it in range(num_iter): print('iteration {}'.format(it)) if k < 10: print(' '*(len(parametersets[-1])+1)+'R@1 R@10 R@100 time(ms)') elif k < 100: print(' '*(len(parametersets[-1])+1)+'R@1 R@10 R@100 time(ms)') else: print(' '*(len(parametersets[-1])+1)+'R@1 R@10 R@100 ndis time(ms)') for param in range(len(parametersets)): sys.stdout.flush() ps.set_index_parameters(index, parametersets[param]) total_recall_at1 = 0.0 total_recall_at10 = 0.0 total_recall_at100 = 0.0 total_latency = 0.0 for i in range(0, nq, batch_size): # When generating training/testing data for the IVF case, # load the cluster indices where the ground truth nearest # neighbor(s) reside. if search_mode < 0 and index_key[:4] != 'HNSW': faiss.load_gt(index, -1) for j in range(batch_size): faiss.load_gt(index, -2) for l in range(len(gt[i+j])): faiss.load_gt(index, int(gt_clusters[gt[i+j][l]])) # When generating training/testing data for the HNSW case, # load the database vector indices of the ground truth # nearest neighbor(s). if search_mode < 0 and index_key[:4] == 'HNSW': faiss.load_gt(index, -1) for j in range(batch_size): faiss.load_gt(index, -2) for l in range(len(gt[i+j])): faiss.load_gt(index, int(gt[i+j][l])) query = xq[i:i+batch_size,:] hnsw_stats.reset() t0 = time.time() D, I = index.search(query, k) t1 = time.time() total_latency += t1-t0 total_recall_at1 += compute_recall(I[:, :1], gt[i:i+batch_size], 1) total_recall_at10 += compute_recall(I[:, :10], gt[i:i+batch_size], 10) total_recall_at100 += compute_recall(I[:, :100], gt[i:i+batch_size], 100) if search_mode < 0: # When generating training/testing data, read the returned # search results (since this is where we stored the # features and targe values). if index_key[:4] == 'HNSW': for j in range(len(I)): line = [] line.append(int(D[j][0])) line.append(i+j) for l in range(1+4*len(pred_thresh)): line.append(D[j][l+1]) data.append(line) else: for j in range(len(I)): line = [] line.append(int(D[j][0])) line.append(i+j) for l in range(10+4*len(pred_thresh)): line.append(D[j][l+1]) data.append(line) tr1 = total_recall_at1 / float(nq) tr10 = total_recall_at10 / float(nq) tr100 = total_recall_at100 / float(nq) tt = total_latency * 1000.0 / nq print(parametersets[param]+ ' '*(len(parametersets[-1])+1-len(parametersets[param]))+ '{:.4f} {:.4f} {:.4f} {:.2f} {:.4f}'.format( round(tr1,4), round(tr10,4), round(tr100,4), hnsw_stats.ndis/nq, round(tt,4))) if it > 0 or num_iter == 1: recall_list[param] += total_recall_at100 latency_list[param] += total_latency # Write the training/testing data files. if search_mode == -1: util.write_tsv(data, '{}{}_{}_test.tsv'.format(TRAINING_DIR, dbname, index_key)) if search_mode == -2: util.write_tsv(data, '{}{}_{}_train.tsv'.format(TRAINING_DIR, dbname, index_key)) denom = float(nq*max(num_iter-1, 1)) recall_list = [x/denom for x in recall_list] latency_list = [round(x*1000.0/denom, 4) for x in latency_list] print('param_list = {}'.format(param_list)) print('recall target = {}'.format(recall_list)) print('average latency(ms) = {}'.format(latency_list)) print('result_{}_{} = {}'.format(dbname, index_key, [latency_list, recall_list])) else: # Binary search to find minimum fixed configuration (for baseline) or minimum # prediction multiplier (for early termination) to reach a certain accuracy # target. target = [0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.96, 0.97, 0.98, 0.99, 1] if dbname.startswith('DEEP10M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.97, 0.98, 0.99, 0.999] if dbname.startswith('GIST1M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.97, 0.98, 0.99, 0.997] if dbname.startswith('GLOV1M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.9668] if dbname.startswith('IMGN1M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.97, 0.98, 0.99, 0.9982] if dbname.startswith('SONG1M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.97, 0.98, 0.99, 1] if dbname.startswith('TREV1M') and index_key[:4] == 'HNSW': target = [0.95, 0.96, 0.97, 0.98, 0.99, 0.997] # For billion-scale, stop at 0.995 because it takes too long to reach 1.0. if int(dbname[4:-1]) == 1000: target = [0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.96, 0.97, 0.98, 0.99, 0.995] res = [] d = {} sys.stdout.flush() ps.set_index_parameters(index, parametersets[0]) for t in target: ret, act_t = find_config(t, d, k) print('To reach recall target {} the min. config/multiplier is {}.'.format(act_t, ret)) res.append(ret) if act_t != t: break print('List of min. config/multiplier = {}'.format(res))
en
0.739875
#!/usr/bin/env python2 ################################################################# # Bookkeeping ################################################################# # Where the dataset base, query, learn files are stored. # Where the ground truth files are stored. # Where the *_trained.index files are stored. # Where the *_populated.index files and cluster indices are stored. # NOTE that the *_populated.index files can be as large as tens of GBs. # Where the trained prediction model and training logs are stored. # Where the training and testing data files are stored. # -2 = generate training data. # -1 = generate testing data. # 0 = baseline (fixed nprobe or fixed efSearch). # 1 = decision tree-based early termination. # batch size # how many training vectors (in millions) # number of cluster for IVF index # When to make prediction/generate training/testing data during search. # This is related to the intermediate search result features. # Binary search to find minimum fixed configuration (for baseline) or minimum # prediction multiplier (for early termination) to reach a certain accuracy # target. # e.g.: SIFT1M # e.g.: IVF1000 # e.g.: nprobe={1,2} # Number of iterations over all queries (to get stable performance number). # When multi-threading is enabled, it indicates that latency measurement # is not the purpose of the experiment. Thus we only run one iteration. ################################################################# # Prepare dataset ################################################################# # trim xb to correct size # Take a sample from the training vector to find the minimum fixed # termination condition to reach different accuracy targets. This is # needed to choose the intermediate search result features when # generating training data. # trim xb to correct size # Take a sample from the training vector to find the minimum fixed # termination condition to reach different accuracy targets. This is # needed to choose the intermediate search result features when # generating training data. # Take a sample from the training vector to find the minimum fixed # termination condition to reach different accuracy targets. This is # needed to choose the intermediate search result features when # generating training data. ################################################################# # Training ################################################################# # some training vectors for PQ and the PCA # make sure the data is actually in RAM and in float ################################################################# # Adding vectors to dataset ################################################################# ################################################################# # Perform searches ################################################################# # Binary search to find minimum fixed configuration (for baseline) or minimum # prediction multiplier (for early termination) to reach a certain accuracy # target. # change the config to mid # Read (and build if necessary) the search index. # Load the prediction model for HNSW index. # Load the prediction model for IVF index. # Load the pred_thresh into the search index. # make sure queries are in RAM # Where the training/testing data will be stored before written to files. # To get the cluster indices where the ground truth nearest neighbor(s) reside # for the IVF case. We need this to determine the minimum termination # condition. This is achieved by combining two things: 1) Using the # write_cluster_id() we wrote the cluster index of all database vectors into # files. 2) In a computeGT.py we performed exhaustive search to find which # database vectors are ground truth nearest neighbor(s). # If a database vector is a ground truth nearest neighbor to a query, # insert its index as a key into gt_clusters dict. # Then for each key, insert its cluster index as the value into # gt_clusters dict. # When generating training/testing data for the IVF case, # load the cluster indices where the ground truth nearest # neighbor(s) reside. # When generating training/testing data for the HNSW case, # load the database vector indices of the ground truth # nearest neighbor(s). # When generating training/testing data, read the returned # search results (since this is where we stored the # features and targe values). # Write the training/testing data files. # Binary search to find minimum fixed configuration (for baseline) or minimum # prediction multiplier (for early termination) to reach a certain accuracy # target. # For billion-scale, stop at 0.995 because it takes too long to reach 1.0.
1.884543
2
Search_based_Planning/Search_2D/bfs.py
gtianyi/metaReasoningAnimation
0
6631509
<reponame>gtianyi/metaReasoningAnimation """ Breadth-first Searching_2D (BFS) @author: <NAME> """ import math import heapq import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../Search_based_Planning/") from Search_2D import plotting from Search_2D.Astar import AStar # from collections import deque class BFS(AStar): """BFS add the new visited node in the end of the openset """ def searching(self): """ Breadth-first Searching. :return: path, visited order """ self.PARENT[self.s_start] = self.s_start self.g[self.s_start] = 0 self.g[self.s_goal] = math.inf heapq.heappush(self.OPEN, (0, self.s_start)) while self.OPEN: _, s = heapq.heappop(self.OPEN) self.CLOSED.append(s) if s == self.s_goal: break for s_n in self.get_neighbor(s): new_cost = self.g[s] + self.cost(s, s_n) if s_n not in self.g: self.g[s_n] = math.inf if new_cost < self.g[s_n]: # conditions for updating Cost self.g[s_n] = new_cost self.PARENT[s_n] = s # bfs, add new node to the end of the openset prior = self.OPEN[-1][0]+1 if len(self.OPEN)>0 else 0 heapq.heappush(self.OPEN, (prior, s_n)) return self.extract_path(self.PARENT), self.CLOSED def main(): s_start = (5, 5) s_goal = (45, 25) bfs = BFS(s_start, s_goal, 'None') plot = plotting.Plotting(s_start, s_goal) path, visited = bfs.searching() plot.animation(path, visited, "Breadth-first Searching (BFS)") if __name__ == '__main__': main()
""" Breadth-first Searching_2D (BFS) @author: <NAME> """ import math import heapq import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../Search_based_Planning/") from Search_2D import plotting from Search_2D.Astar import AStar # from collections import deque class BFS(AStar): """BFS add the new visited node in the end of the openset """ def searching(self): """ Breadth-first Searching. :return: path, visited order """ self.PARENT[self.s_start] = self.s_start self.g[self.s_start] = 0 self.g[self.s_goal] = math.inf heapq.heappush(self.OPEN, (0, self.s_start)) while self.OPEN: _, s = heapq.heappop(self.OPEN) self.CLOSED.append(s) if s == self.s_goal: break for s_n in self.get_neighbor(s): new_cost = self.g[s] + self.cost(s, s_n) if s_n not in self.g: self.g[s_n] = math.inf if new_cost < self.g[s_n]: # conditions for updating Cost self.g[s_n] = new_cost self.PARENT[s_n] = s # bfs, add new node to the end of the openset prior = self.OPEN[-1][0]+1 if len(self.OPEN)>0 else 0 heapq.heappush(self.OPEN, (prior, s_n)) return self.extract_path(self.PARENT), self.CLOSED def main(): s_start = (5, 5) s_goal = (45, 25) bfs = BFS(s_start, s_goal, 'None') plot = plotting.Plotting(s_start, s_goal) path, visited = bfs.searching() plot.animation(path, visited, "Breadth-first Searching (BFS)") if __name__ == '__main__': main()
en
0.797436
Breadth-first Searching_2D (BFS) @author: <NAME> # from collections import deque BFS add the new visited node in the end of the openset Breadth-first Searching. :return: path, visited order # conditions for updating Cost # bfs, add new node to the end of the openset
3.494199
3
Moving Averages/TF_Adaptive_Moving_Average.py
sofienkaabar/Trend-Following-Strategies-in-Python
18
6631510
# Base Parameters assets = asset_list('FX') # Trading Parameters horizon = 'H1' pair = 0 # Mass Imports my_data = mass_import(pair, horizon) # Indicator Parameters lookback = 100 def kama(Data, what, where, lookback): Data = adder(Data, 10) # lookback from previous period for i in range(len(Data)): Data[i, where] = abs(Data[i, what] - Data[i - 1, what]) Data[0, where] = 0 # Sum of lookbacks for i in range(len(Data)): Data[i, where + 1] = (Data[i - lookback + 1:i + 1, where].sum()) # Volatility for i in range(len(Data)): Data[i, where + 2] = abs(Data[i, what] - Data[i - lookback, what]) Data = Data[lookback + 1:, ] # Efficiency Ratio Data[:, where + 3] = Data[:, where + 2] / Data[:, where + 1] for i in range(len(Data)): Data[i, where + 4] = np.square(Data[i, where + 3] * 0.6666666666666666667) for i in range(len(Data)): Data[i, where + 5] = Data[i - 1, where + 5] + (Data[i, where + 4] * (Data[i, what] - Data[i - 1, where + 5])) Data[11, where + 5] = 0 Data = deleter(Data, where, 5) Data = jump(Data, lookback * 2) return Data my_data = kama(my_data, 3, 4, lookback) ohlc_plot_bars(my_data, 500) plt.plot(my_data[-500:, 4], label = '100-period Adaptive Moving Average') plt.legend()
# Base Parameters assets = asset_list('FX') # Trading Parameters horizon = 'H1' pair = 0 # Mass Imports my_data = mass_import(pair, horizon) # Indicator Parameters lookback = 100 def kama(Data, what, where, lookback): Data = adder(Data, 10) # lookback from previous period for i in range(len(Data)): Data[i, where] = abs(Data[i, what] - Data[i - 1, what]) Data[0, where] = 0 # Sum of lookbacks for i in range(len(Data)): Data[i, where + 1] = (Data[i - lookback + 1:i + 1, where].sum()) # Volatility for i in range(len(Data)): Data[i, where + 2] = abs(Data[i, what] - Data[i - lookback, what]) Data = Data[lookback + 1:, ] # Efficiency Ratio Data[:, where + 3] = Data[:, where + 2] / Data[:, where + 1] for i in range(len(Data)): Data[i, where + 4] = np.square(Data[i, where + 3] * 0.6666666666666666667) for i in range(len(Data)): Data[i, where + 5] = Data[i - 1, where + 5] + (Data[i, where + 4] * (Data[i, what] - Data[i - 1, where + 5])) Data[11, where + 5] = 0 Data = deleter(Data, where, 5) Data = jump(Data, lookback * 2) return Data my_data = kama(my_data, 3, 4, lookback) ohlc_plot_bars(my_data, 500) plt.plot(my_data[-500:, 4], label = '100-period Adaptive Moving Average') plt.legend()
en
0.642944
# Base Parameters # Trading Parameters # Mass Imports # Indicator Parameters # lookback from previous period # Sum of lookbacks # Volatility # Efficiency Ratio
2.369885
2
skyscanner.py
gen2127/TK_1919
0
6631511
<filename>skyscanner.py import requests import json import urllib import webbrowser url = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/pricing/v1.0" APIKey = "<KEY>" headers={ "X-RapidAPI-Host": "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com", "X-RapidAPI-Key": APIKey, "Content-Type": "application/x-www-form-urlencoded" } params={ "country": "US", "currency": "USD", "locale": "en-US", "originPlace": "SFO-sky", "destinationPlace": "LHR-sky", "outboundDate": "2019-12-01", "adults": 1 } req = requests.post(url,data = params,headers=headers) r = req.headers r = r['Location'] webbrowser.open(r,1) #urllib.request.urlopen(r) #r = req.headers #print(r) #print(r['Location']) #headers={ # "X-RapidAPI-Host": "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com", # "X-RapidAPI-Key": APIKey, # "Content-Type": "application/json" #} #q = requests.post(r['Location'],headers = headers) #print(q)
<filename>skyscanner.py import requests import json import urllib import webbrowser url = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/pricing/v1.0" APIKey = "<KEY>" headers={ "X-RapidAPI-Host": "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com", "X-RapidAPI-Key": APIKey, "Content-Type": "application/x-www-form-urlencoded" } params={ "country": "US", "currency": "USD", "locale": "en-US", "originPlace": "SFO-sky", "destinationPlace": "LHR-sky", "outboundDate": "2019-12-01", "adults": 1 } req = requests.post(url,data = params,headers=headers) r = req.headers r = r['Location'] webbrowser.open(r,1) #urllib.request.urlopen(r) #r = req.headers #print(r) #print(r['Location']) #headers={ # "X-RapidAPI-Host": "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com", # "X-RapidAPI-Key": APIKey, # "Content-Type": "application/json" #} #q = requests.post(r['Location'],headers = headers) #print(q)
en
0.43853
#urllib.request.urlopen(r) #r = req.headers #print(r) #print(r['Location']) #headers={ # "X-RapidAPI-Host": "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com", # "X-RapidAPI-Key": APIKey, # "Content-Type": "application/json" #} #q = requests.post(r['Location'],headers = headers) #print(q)
3.542863
4
tests/test_parallel_pool.py
srafehi/taskall
0
6631512
<gh_stars>0 from unittest import TestCase import taskall from taskall import parallel import multiprocessing class TestParallelPool(TestCase): @staticmethod def multiply(a, b): return a * b def test_pool_size_default(self): pool = parallel.pool(pool_size=None) self.assertEquals(pool.pool_size, multiprocessing.cpu_count()) def test_pool_size_custom(self): pool = parallel.pool(pool_size=8) self.assertEquals(pool.pool_size, 8) def test_pool_size_zero(self): with self.assertRaises(ValueError): parallel.pool(pool_size=0) def test_pool_size_negative(self): with self.assertRaises(ValueError): parallel.pool(pool_size=-1) def test_pool_size_string(self): with self.assertRaises(ValueError): parallel.pool(pool_size='invalid') def test_pool_parent(self): pool = parallel.pool(pool_size=2) pooled_multiply = pool.poolify(self.multiply) self.assertIs(pooled_multiply._parent_pool, pool) def test_pool_original_func(self): pool = parallel.pool(pool_size=2) pooled_multiply = pool.poolify(self.multiply) self.assertIs(pooled_multiply._original_func, self.multiply) def test_pool_task_size_total(self): pool = parallel.pool(pool_size=2) pooled_multiply = pool.poolify(self.multiply) f1 = pooled_multiply(1, 2) f2 = pooled_multiply(2, 3) f3 = pooled_multiply(3, 4) self.assertEquals(len(pool), 3) taskall.future.FutureCollection((f1, f2, f3)).run_until_completion() self.assertEquals(len(pool), 0) def test_pool_task_size_individual(self): pool = parallel.pool(pool_size=2) pooled_multiply = pool.poolify(self.multiply) f1 = pooled_multiply(1, 2) f2 = pooled_multiply(2, 3) f3 = pooled_multiply(3, 4) self.assertEquals(len(pool.taskers[0]), 2) self.assertEquals(len(pool.taskers[1]), 1) taskall.future.FutureCollection((f1, f2, f3)).run_until_completion() self.assertEquals(len(pool.taskers[0]), 0) self.assertEquals(len(pool.taskers[1]), 0) def test_pool_map_args(self): pool = parallel.pool(pool_size=2) futures = pool.map(self.multiply, [1, 2, 3], [1, 2, 3]) self.assertEquals(futures[0]._func_args, (1, 1)) self.assertEquals(futures[1]._func_args, (2, 2)) self.assertEquals(futures[2]._func_args, (3, 3)) def test_pool_map_task_count(self): pool = parallel.pool(pool_size=2) futures = pool.map(self.multiply, [1, 2, 3], [1, 2, 3]) self.assertEquals(len(pool), len(futures)) def test_pool_map_results(self): pool = parallel.pool(pool_size=2) futures = pool.map(self.multiply, [1, 2, 3], [1, 2, 3]) self.assertEquals( set(futures.results), { self.multiply(1, 1), self.multiply(2, 2), self.multiply(3, 3) }) def test_pool_map_results_iter(self): pool = parallel.pool(pool_size=2) futures = pool.map(self.multiply, [1, 2, 3], [1, 2, 3]) results = set(futures) self.assertEquals(results, { self.multiply(1, 1), self.multiply(2, 2), self.multiply(3, 3) }) def test_pool_terminate_exc_1(self): pool = parallel.pool(pool_size=2) pool.terminate() with self.assertRaises(IOError): pool.add_task(self.multiply, (1,), (2,)) def test_pool_terminate_exc_2(self): pool = parallel.pool(pool_size=2) pool.terminate() with self.assertRaises(IOError): pool.poolify(self.multiply) def test_pool_terminate_exc_3(self): pool = parallel.pool(pool_size=2) futures = pool.map(self.multiply, (1,), (2,)) pool.terminate() with self.assertRaises(IOError): list(futures)
from unittest import TestCase import taskall from taskall import parallel import multiprocessing class TestParallelPool(TestCase): @staticmethod def multiply(a, b): return a * b def test_pool_size_default(self): pool = parallel.pool(pool_size=None) self.assertEquals(pool.pool_size, multiprocessing.cpu_count()) def test_pool_size_custom(self): pool = parallel.pool(pool_size=8) self.assertEquals(pool.pool_size, 8) def test_pool_size_zero(self): with self.assertRaises(ValueError): parallel.pool(pool_size=0) def test_pool_size_negative(self): with self.assertRaises(ValueError): parallel.pool(pool_size=-1) def test_pool_size_string(self): with self.assertRaises(ValueError): parallel.pool(pool_size='invalid') def test_pool_parent(self): pool = parallel.pool(pool_size=2) pooled_multiply = pool.poolify(self.multiply) self.assertIs(pooled_multiply._parent_pool, pool) def test_pool_original_func(self): pool = parallel.pool(pool_size=2) pooled_multiply = pool.poolify(self.multiply) self.assertIs(pooled_multiply._original_func, self.multiply) def test_pool_task_size_total(self): pool = parallel.pool(pool_size=2) pooled_multiply = pool.poolify(self.multiply) f1 = pooled_multiply(1, 2) f2 = pooled_multiply(2, 3) f3 = pooled_multiply(3, 4) self.assertEquals(len(pool), 3) taskall.future.FutureCollection((f1, f2, f3)).run_until_completion() self.assertEquals(len(pool), 0) def test_pool_task_size_individual(self): pool = parallel.pool(pool_size=2) pooled_multiply = pool.poolify(self.multiply) f1 = pooled_multiply(1, 2) f2 = pooled_multiply(2, 3) f3 = pooled_multiply(3, 4) self.assertEquals(len(pool.taskers[0]), 2) self.assertEquals(len(pool.taskers[1]), 1) taskall.future.FutureCollection((f1, f2, f3)).run_until_completion() self.assertEquals(len(pool.taskers[0]), 0) self.assertEquals(len(pool.taskers[1]), 0) def test_pool_map_args(self): pool = parallel.pool(pool_size=2) futures = pool.map(self.multiply, [1, 2, 3], [1, 2, 3]) self.assertEquals(futures[0]._func_args, (1, 1)) self.assertEquals(futures[1]._func_args, (2, 2)) self.assertEquals(futures[2]._func_args, (3, 3)) def test_pool_map_task_count(self): pool = parallel.pool(pool_size=2) futures = pool.map(self.multiply, [1, 2, 3], [1, 2, 3]) self.assertEquals(len(pool), len(futures)) def test_pool_map_results(self): pool = parallel.pool(pool_size=2) futures = pool.map(self.multiply, [1, 2, 3], [1, 2, 3]) self.assertEquals( set(futures.results), { self.multiply(1, 1), self.multiply(2, 2), self.multiply(3, 3) }) def test_pool_map_results_iter(self): pool = parallel.pool(pool_size=2) futures = pool.map(self.multiply, [1, 2, 3], [1, 2, 3]) results = set(futures) self.assertEquals(results, { self.multiply(1, 1), self.multiply(2, 2), self.multiply(3, 3) }) def test_pool_terminate_exc_1(self): pool = parallel.pool(pool_size=2) pool.terminate() with self.assertRaises(IOError): pool.add_task(self.multiply, (1,), (2,)) def test_pool_terminate_exc_2(self): pool = parallel.pool(pool_size=2) pool.terminate() with self.assertRaises(IOError): pool.poolify(self.multiply) def test_pool_terminate_exc_3(self): pool = parallel.pool(pool_size=2) futures = pool.map(self.multiply, (1,), (2,)) pool.terminate() with self.assertRaises(IOError): list(futures)
none
1
2.996573
3
ilexconf/tests/examples/test_quick_start.py
vduseev/holly-config
9
6631513
import os import pytest from ilexconf import Config, from_json, to_json # from ilexconf.tests.debug import debug @pytest.fixture(scope="module") def resulting_dict(): return { "database": { "connection": { "host": "test.local", "port": 8080, "user": "root", "password": "<PASSWORD>", } } } def test_quick_start( settings_json_dict, settings_json_file_path, resulting_dict, tmp_path ): # os.putenv("AWS_DEFAULT_REGION", "us-east-1") os.environ["AWS_DEFAULT_REGION"] = "us-east-1" # [create] from ilexconf import Config, from_json, from_env, to_json # Empty config config = Config() assert dict(config) == {} # Create config from json and merge it into our initial config # Let settings_json_file_path = "settings.json" where inside the file we have # { "database": { "connection": { "host": "localhost", "port": 5432 } } } config.merge(from_json(settings_json_file_path)) assert dict(config) == { "database": {"connection": {"host": "localhost", "port": 5432}} } # Merge dict into config config.merge({"database": {"connection": {"host": "test.local"}}}) assert dict(config) == { "database": {"connection": {"host": "test.local", "port": 5432}} } # Merge environment variables into config config.merge(from_env(prefix="AWS_", separator="__").lower(inplace=True)) assert dict(config) == { "database": {"connection": {"host": "test.local", "port": 5432}}, "default_region": "us-east-1", } # Merge keyword arguments config.set("my__keyword__argument", True, key_sep="__") assert dict(config) == { "database": {"connection": {"host": "test.local", "port": 5432}}, "default_region": "us-east-1", "my": {"keyword": {"argument": True}}, } # Clear values, just like with dict config.clear() assert dict(config) == {} # Or, better yet, do this all in one step, since Config() constructor # accepts any number of mapping objects and keyword arguments as # initialization parameters. However, order of parameters matters. # Last mappings are merged on top of others. And keywords override even that. config = Config( from_json(settings_json_file_path), {"database": {"connection": {"host": "test.local"}}}, database__connection__port=4000, ) assert dict(config) == { "database": {"connection": {"host": "test.local", "port": 4000}} } # [create] # from ilexconf import ( # from_json, # # from_yaml, # # from_toml, # from_ini, # # from_python, # # from_dotenv, # from_env, # ) # [read] cfg1 = from_json(settings_json_file_path) assert dict(cfg1) == { "database": {"connection": {"host": "localhost", "port": 5432}} } # [read] # cfg2 = Config( # from_yaml("settings.yaml"), # from_toml("settings.toml") # ) # cfg3 = Config( # from_ini("settings.ini"), # from_python("settings.py"), # from_dotenv(".env"), # from_env() # ) # [access] # Classic way assert config["database"]["connection"]["host"] == "test.local" # Dotted key notation assert config["database.connection.host"] == "test.local" # Via attributes assert config.database.connection.host == "test.local" # Any combination of the above assert config["database"].connection.host == "test.local" assert config.database["connection.host"] == "test.local" assert config.database["connection"].host == "test.local" assert config.database.connection["host"] == "test.local" # [access] # [upsert] # Change value that already exists in the dictionary # just like you would do with simple dict config["database"]["connection"]["port"] = 8080 assert config["database"]["connection"]["port"] == 8080 # Create new value using 'dotted notation'. Notice that # 'user' field did not exist before. config["database.connection.user"] = "root" assert config["database.connection.user"] == "root" # Create new value using. 'password' field did not exist # before we assigned a value to it and was created automatically. config.database.connection.password = "<PASSWORD>" assert config.database.connection.password == "<PASSWORD>" # [upsert] # [merge] # Config correctly merges nested values. Notice how it overrides # the value of the 'password' key in the nested 'connection' config # from 'secret stuff' to 'different secret' config.database.connection.merge({"password": "<PASSWORD>"}) assert config.database.connection.password == "<PASSWORD>" # [merge] # [smart-merge] merged = Config( {"a1": {"c1": 1, "c2": 2, "c3": 3}}, {"a1": {"c3": "other"}} ) # Instead of overriding the value of the "a1" key completely, `merge` method # will recursively look inside and merge nested values. assert dict(merged) == {"a1": {"c1": 1, "c2": 2, "c3": "other"}} # [smart-merge] # [as-dict] assert dict(config) == { "database": { "connection": { "host": "test.local", "port": 8080, "user": "root", "password": "<PASSWORD>", } } } # [as-dict] # [write] # Temporary path p = tmp_path / "settings.json" # Save config to_json(config, p) # Verify written file is correct assert dict(from_json(p)) == { "database": { "connection": { "host": "test.local", "port": 8080, "user": "root", "password": "<PASSWORD>", } } } # [write] # [subclass] class MyConfig(Config): def __init__(self, do_stuff=False): # Initialize your custom config using json settings file super().__init__(from_json(settings_json_file_path)) # Add some custom value depending on some logic if do_stuff: # Here, we create new nested key that did not exist # before and assign a value to it. self.my.custom.key = "Yes, do stuff" # Merge one more mapping on top self.merge({"Horizon": "Up"}) # [subclass] # [test-subclass] # Now you can use your custom defined Config. Given the `setting.json` file that # contains { "database": { "connection": { "host": "localhost", "port": 5432 } } } # MyConfig will have the following values: config = MyConfig(do_stuff=True) assert dict(config) == { "database": { "connection": { "host": "localhost", "port": 5432, }, }, "Horizon": "Up", "my": {"custom": {"key": "Yes, do stuff"}}, } # [test-subclass]
import os import pytest from ilexconf import Config, from_json, to_json # from ilexconf.tests.debug import debug @pytest.fixture(scope="module") def resulting_dict(): return { "database": { "connection": { "host": "test.local", "port": 8080, "user": "root", "password": "<PASSWORD>", } } } def test_quick_start( settings_json_dict, settings_json_file_path, resulting_dict, tmp_path ): # os.putenv("AWS_DEFAULT_REGION", "us-east-1") os.environ["AWS_DEFAULT_REGION"] = "us-east-1" # [create] from ilexconf import Config, from_json, from_env, to_json # Empty config config = Config() assert dict(config) == {} # Create config from json and merge it into our initial config # Let settings_json_file_path = "settings.json" where inside the file we have # { "database": { "connection": { "host": "localhost", "port": 5432 } } } config.merge(from_json(settings_json_file_path)) assert dict(config) == { "database": {"connection": {"host": "localhost", "port": 5432}} } # Merge dict into config config.merge({"database": {"connection": {"host": "test.local"}}}) assert dict(config) == { "database": {"connection": {"host": "test.local", "port": 5432}} } # Merge environment variables into config config.merge(from_env(prefix="AWS_", separator="__").lower(inplace=True)) assert dict(config) == { "database": {"connection": {"host": "test.local", "port": 5432}}, "default_region": "us-east-1", } # Merge keyword arguments config.set("my__keyword__argument", True, key_sep="__") assert dict(config) == { "database": {"connection": {"host": "test.local", "port": 5432}}, "default_region": "us-east-1", "my": {"keyword": {"argument": True}}, } # Clear values, just like with dict config.clear() assert dict(config) == {} # Or, better yet, do this all in one step, since Config() constructor # accepts any number of mapping objects and keyword arguments as # initialization parameters. However, order of parameters matters. # Last mappings are merged on top of others. And keywords override even that. config = Config( from_json(settings_json_file_path), {"database": {"connection": {"host": "test.local"}}}, database__connection__port=4000, ) assert dict(config) == { "database": {"connection": {"host": "test.local", "port": 4000}} } # [create] # from ilexconf import ( # from_json, # # from_yaml, # # from_toml, # from_ini, # # from_python, # # from_dotenv, # from_env, # ) # [read] cfg1 = from_json(settings_json_file_path) assert dict(cfg1) == { "database": {"connection": {"host": "localhost", "port": 5432}} } # [read] # cfg2 = Config( # from_yaml("settings.yaml"), # from_toml("settings.toml") # ) # cfg3 = Config( # from_ini("settings.ini"), # from_python("settings.py"), # from_dotenv(".env"), # from_env() # ) # [access] # Classic way assert config["database"]["connection"]["host"] == "test.local" # Dotted key notation assert config["database.connection.host"] == "test.local" # Via attributes assert config.database.connection.host == "test.local" # Any combination of the above assert config["database"].connection.host == "test.local" assert config.database["connection.host"] == "test.local" assert config.database["connection"].host == "test.local" assert config.database.connection["host"] == "test.local" # [access] # [upsert] # Change value that already exists in the dictionary # just like you would do with simple dict config["database"]["connection"]["port"] = 8080 assert config["database"]["connection"]["port"] == 8080 # Create new value using 'dotted notation'. Notice that # 'user' field did not exist before. config["database.connection.user"] = "root" assert config["database.connection.user"] == "root" # Create new value using. 'password' field did not exist # before we assigned a value to it and was created automatically. config.database.connection.password = "<PASSWORD>" assert config.database.connection.password == "<PASSWORD>" # [upsert] # [merge] # Config correctly merges nested values. Notice how it overrides # the value of the 'password' key in the nested 'connection' config # from 'secret stuff' to 'different secret' config.database.connection.merge({"password": "<PASSWORD>"}) assert config.database.connection.password == "<PASSWORD>" # [merge] # [smart-merge] merged = Config( {"a1": {"c1": 1, "c2": 2, "c3": 3}}, {"a1": {"c3": "other"}} ) # Instead of overriding the value of the "a1" key completely, `merge` method # will recursively look inside and merge nested values. assert dict(merged) == {"a1": {"c1": 1, "c2": 2, "c3": "other"}} # [smart-merge] # [as-dict] assert dict(config) == { "database": { "connection": { "host": "test.local", "port": 8080, "user": "root", "password": "<PASSWORD>", } } } # [as-dict] # [write] # Temporary path p = tmp_path / "settings.json" # Save config to_json(config, p) # Verify written file is correct assert dict(from_json(p)) == { "database": { "connection": { "host": "test.local", "port": 8080, "user": "root", "password": "<PASSWORD>", } } } # [write] # [subclass] class MyConfig(Config): def __init__(self, do_stuff=False): # Initialize your custom config using json settings file super().__init__(from_json(settings_json_file_path)) # Add some custom value depending on some logic if do_stuff: # Here, we create new nested key that did not exist # before and assign a value to it. self.my.custom.key = "Yes, do stuff" # Merge one more mapping on top self.merge({"Horizon": "Up"}) # [subclass] # [test-subclass] # Now you can use your custom defined Config. Given the `setting.json` file that # contains { "database": { "connection": { "host": "localhost", "port": 5432 } } } # MyConfig will have the following values: config = MyConfig(do_stuff=True) assert dict(config) == { "database": { "connection": { "host": "localhost", "port": 5432, }, }, "Horizon": "Up", "my": {"custom": {"key": "Yes, do stuff"}}, } # [test-subclass]
en
0.740019
# from ilexconf.tests.debug import debug # os.putenv("AWS_DEFAULT_REGION", "us-east-1") # [create] # Empty config # Create config from json and merge it into our initial config # Let settings_json_file_path = "settings.json" where inside the file we have # { "database": { "connection": { "host": "localhost", "port": 5432 } } } # Merge dict into config # Merge environment variables into config # Merge keyword arguments # Clear values, just like with dict # Or, better yet, do this all in one step, since Config() constructor # accepts any number of mapping objects and keyword arguments as # initialization parameters. However, order of parameters matters. # Last mappings are merged on top of others. And keywords override even that. # [create] # from ilexconf import ( # from_json, # # from_yaml, # # from_toml, # from_ini, # # from_python, # # from_dotenv, # from_env, # ) # [read] # [read] # cfg2 = Config( # from_yaml("settings.yaml"), # from_toml("settings.toml") # ) # cfg3 = Config( # from_ini("settings.ini"), # from_python("settings.py"), # from_dotenv(".env"), # from_env() # ) # [access] # Classic way # Dotted key notation # Via attributes # Any combination of the above # [access] # [upsert] # Change value that already exists in the dictionary # just like you would do with simple dict # Create new value using 'dotted notation'. Notice that # 'user' field did not exist before. # Create new value using. 'password' field did not exist # before we assigned a value to it and was created automatically. # [upsert] # [merge] # Config correctly merges nested values. Notice how it overrides # the value of the 'password' key in the nested 'connection' config # from 'secret stuff' to 'different secret' # [merge] # [smart-merge] # Instead of overriding the value of the "a1" key completely, `merge` method # will recursively look inside and merge nested values. # [smart-merge] # [as-dict] # [as-dict] # [write] # Temporary path # Save config # Verify written file is correct # [write] # [subclass] # Initialize your custom config using json settings file # Add some custom value depending on some logic # Here, we create new nested key that did not exist # before and assign a value to it. # Merge one more mapping on top # [subclass] # [test-subclass] # Now you can use your custom defined Config. Given the `setting.json` file that # contains { "database": { "connection": { "host": "localhost", "port": 5432 } } } # MyConfig will have the following values: # [test-subclass]
2.274183
2
golem/test_runner/excel_runner/ExcelUtils.py
kangchenwei/keyautotest2
0
6631514
<gh_stars>0 # import xlrd import xlrd import time class ExcelUtils: def __init__(self, filePath, driver): self.data = xlrd.open_workbook(filePath) self.driver = driver def getTableData(self, tableName): return self.data.sheet_by_name(tableName) def getRowValues(self, tableData, rowId): return tableData.row_values(rowId) #默认执行第一个sheet def execute(self): print('hello') #执行路径为filePath,sheet名字是sheetName的Excel @staticmethod def execute(filePath, sheetName, driver): excelUtils = ExcelUtils(filePath, driver) tableData = excelUtils.getTableData(sheetName) titleRowValues = excelUtils.getRowValues(tableData, 0) ##这里仅仅是做了执行,如果要转换成py脚本,跟这个思路类似,略有不同 print(tableData.col_values(0)) for i in range(len(tableData.col_values(0))): if i == 0: continue rowValues = excelUtils.getRowValues(tableData, i) keyDescription = '' keyAction = '' keyFindWay = '' keyElement = '' keyValue = '' for j in range(len(rowValues)): key = titleRowValues[j] if key == 'Model': print('model') elif key == 'step description': keyDescription = rowValues[j] print(keyDescription) elif key == 'Action': keyAction = rowValues[j] print(keyAction) elif key == 'FindWay': keyFindWay = rowValues[j] print(keyFindWay) elif key == 'Element': keyElement = rowValues[j] print(keyElement) elif key == 'Value': keyValue = rowValues[j] print(keyValue) else: print('is not valid key:'+rowValues[j]) #判断关键字的类型,分别对应进行操作 # if keyAction == 'sleep': # time.sleep(keyValue) # if keyFindWay == '' or keyElement == '': # #为了节省时间,这里只考虑到常规的查找控件然后操作,对于坐标类的,启动app类的都没做考虑 # continue # element = excelUtils.getElement(keyFindWay, keyElement) # print("element------------") # print(element) # if keyAction == '' or element == '': # continue excelUtils.doOperation(excelUtils, keyAction, keyFindWay, keyElement, keyValue) #根据信息获取相应的元素 def getElement(self, findWay, value): if findWay == 'find_element_by_xpath': return self.driver.find_element_by_xpath(value) elif findWay == 'find_element_by_id': return self.driver.find_element_by_id(value) elif findWay == 'find_element_by_class': return self.driver.find_element_by_class(value) elif findWay == 'find_element_by_name': return self.driver.find_element_by_name(value) return '' #以此类推,往下写 # 根据控件类型执行相应的操作 def doOperation(self, excelUtils, keyaction, keyfindway, keyelement, keyvalue): if keyaction == 'sleep': time.sleep(keyvalue) elif keyaction == 'click': element = excelUtils.getElement(keyfindway, keyelement) element.click() elif keyaction == 'send_keys': element = excelUtils.getElement(keyfindway, keyelement) element.send_keys(keyvalue) elif keyaction == 'set_text': element = excelUtils.getElement(keyfindway, keyelement) element.set_text(keyvalue) elif keyaction == 'swipe': coordinates = keyvalue.split(',') self.driver.swipe(coordinates[0], coordinates[1], coordinates[2], coordinates[3]) elif keyaction == 'close_app': self.driver.close_app() elif keyaction == 'remove_app': self.driver.remove_app(keyvalue) elif keyaction == 'implicitly_wait': self.driver.implicitly_wait(keyvalue) # 以此类推往下写
# import xlrd import xlrd import time class ExcelUtils: def __init__(self, filePath, driver): self.data = xlrd.open_workbook(filePath) self.driver = driver def getTableData(self, tableName): return self.data.sheet_by_name(tableName) def getRowValues(self, tableData, rowId): return tableData.row_values(rowId) #默认执行第一个sheet def execute(self): print('hello') #执行路径为filePath,sheet名字是sheetName的Excel @staticmethod def execute(filePath, sheetName, driver): excelUtils = ExcelUtils(filePath, driver) tableData = excelUtils.getTableData(sheetName) titleRowValues = excelUtils.getRowValues(tableData, 0) ##这里仅仅是做了执行,如果要转换成py脚本,跟这个思路类似,略有不同 print(tableData.col_values(0)) for i in range(len(tableData.col_values(0))): if i == 0: continue rowValues = excelUtils.getRowValues(tableData, i) keyDescription = '' keyAction = '' keyFindWay = '' keyElement = '' keyValue = '' for j in range(len(rowValues)): key = titleRowValues[j] if key == 'Model': print('model') elif key == 'step description': keyDescription = rowValues[j] print(keyDescription) elif key == 'Action': keyAction = rowValues[j] print(keyAction) elif key == 'FindWay': keyFindWay = rowValues[j] print(keyFindWay) elif key == 'Element': keyElement = rowValues[j] print(keyElement) elif key == 'Value': keyValue = rowValues[j] print(keyValue) else: print('is not valid key:'+rowValues[j]) #判断关键字的类型,分别对应进行操作 # if keyAction == 'sleep': # time.sleep(keyValue) # if keyFindWay == '' or keyElement == '': # #为了节省时间,这里只考虑到常规的查找控件然后操作,对于坐标类的,启动app类的都没做考虑 # continue # element = excelUtils.getElement(keyFindWay, keyElement) # print("element------------") # print(element) # if keyAction == '' or element == '': # continue excelUtils.doOperation(excelUtils, keyAction, keyFindWay, keyElement, keyValue) #根据信息获取相应的元素 def getElement(self, findWay, value): if findWay == 'find_element_by_xpath': return self.driver.find_element_by_xpath(value) elif findWay == 'find_element_by_id': return self.driver.find_element_by_id(value) elif findWay == 'find_element_by_class': return self.driver.find_element_by_class(value) elif findWay == 'find_element_by_name': return self.driver.find_element_by_name(value) return '' #以此类推,往下写 # 根据控件类型执行相应的操作 def doOperation(self, excelUtils, keyaction, keyfindway, keyelement, keyvalue): if keyaction == 'sleep': time.sleep(keyvalue) elif keyaction == 'click': element = excelUtils.getElement(keyfindway, keyelement) element.click() elif keyaction == 'send_keys': element = excelUtils.getElement(keyfindway, keyelement) element.send_keys(keyvalue) elif keyaction == 'set_text': element = excelUtils.getElement(keyfindway, keyelement) element.set_text(keyvalue) elif keyaction == 'swipe': coordinates = keyvalue.split(',') self.driver.swipe(coordinates[0], coordinates[1], coordinates[2], coordinates[3]) elif keyaction == 'close_app': self.driver.close_app() elif keyaction == 'remove_app': self.driver.remove_app(keyvalue) elif keyaction == 'implicitly_wait': self.driver.implicitly_wait(keyvalue) # 以此类推往下写
zh
0.584371
# import xlrd #默认执行第一个sheet #执行路径为filePath,sheet名字是sheetName的Excel ##这里仅仅是做了执行,如果要转换成py脚本,跟这个思路类似,略有不同 #判断关键字的类型,分别对应进行操作 # if keyAction == 'sleep': # time.sleep(keyValue) # if keyFindWay == '' or keyElement == '': # #为了节省时间,这里只考虑到常规的查找控件然后操作,对于坐标类的,启动app类的都没做考虑 # continue # element = excelUtils.getElement(keyFindWay, keyElement) # print("element------------") # print(element) # if keyAction == '' or element == '': # continue #根据信息获取相应的元素 #以此类推,往下写 # 根据控件类型执行相应的操作 # 以此类推往下写
2.591539
3
src/cust_utils/utils.py
muiton/123MoviesRIpper
11
6631515
#!/usr/bin/env python # -*- coding: utf-8 -*- from . import path_util import subprocess def create_file(file_path, file_name, data_to_write): if not isinstance(data_to_write, str): data_to_write = str(data_to_write) if not data_to_write or not str(data_to_write).strip(): print("Empty data provided for {0}".format(file_name)) return False file_location = path_util.get_abs_path_name(file_path, file_name) with open(file_location, 'w') as f: f.write(data_to_write) f.flush() return True def create_file_binary_mode(file_path, file_name, data_to_write): if not data_to_write or not str(data_to_write).strip(): print("Empty data provided for {0}".format(file_name)) return False file_location = path_util.get_abs_path_name(file_path, file_name) with open(file_location, 'wb') as f: f.write(data_to_write) f.flush() return True def read_file_data(file_path, file_name): file_location = path_util.get_abs_path_name(file_path, file_name) content = None with open(file_location, 'r') as f: content = f.read().strip() return None if content == "" else content def get_clean_path_name(path_name): for cha in '\/*?:"<>|,;\'': path_name = path_name.replace(cha, ' -') return path_name def get_youtube_dl_command(file_location, video_url): command = 'youtube-dl -i "{0}" -o "{1}"'.format(video_url, file_location) return command def call_youtube_dl(youtube_dl_command): process = subprocess.Popen(youtube_dl_command, shell=True, stdout=subprocess.PIPE) process.wait() return process.returncode
#!/usr/bin/env python # -*- coding: utf-8 -*- from . import path_util import subprocess def create_file(file_path, file_name, data_to_write): if not isinstance(data_to_write, str): data_to_write = str(data_to_write) if not data_to_write or not str(data_to_write).strip(): print("Empty data provided for {0}".format(file_name)) return False file_location = path_util.get_abs_path_name(file_path, file_name) with open(file_location, 'w') as f: f.write(data_to_write) f.flush() return True def create_file_binary_mode(file_path, file_name, data_to_write): if not data_to_write or not str(data_to_write).strip(): print("Empty data provided for {0}".format(file_name)) return False file_location = path_util.get_abs_path_name(file_path, file_name) with open(file_location, 'wb') as f: f.write(data_to_write) f.flush() return True def read_file_data(file_path, file_name): file_location = path_util.get_abs_path_name(file_path, file_name) content = None with open(file_location, 'r') as f: content = f.read().strip() return None if content == "" else content def get_clean_path_name(path_name): for cha in '\/*?:"<>|,;\'': path_name = path_name.replace(cha, ' -') return path_name def get_youtube_dl_command(file_location, video_url): command = 'youtube-dl -i "{0}" -o "{1}"'.format(video_url, file_location) return command def call_youtube_dl(youtube_dl_command): process = subprocess.Popen(youtube_dl_command, shell=True, stdout=subprocess.PIPE) process.wait() return process.returncode
en
0.352855
#!/usr/bin/env python # -*- coding: utf-8 -*-
3.076675
3
testproject/testapp/validators.py
n3rdftw/djoser
1
6631516
<reponame>n3rdftw/djoser def is_666(value): from rest_framework import serializers if value == '666': raise serializers.ValidationError('Woops, 666 is not allowed.')
def is_666(value): from rest_framework import serializers if value == '666': raise serializers.ValidationError('Woops, 666 is not allowed.')
none
1
2.527579
3
faker/providers/person/ar_AA/__init__.py
jacksmith15/faker
1
6631517
<reponame>jacksmith15/faker from typing import Tuple from .. import Provider as PersonProvider class Provider(PersonProvider): formats_female: Tuple[str, ...] = ( "{{first_name_female}} {{last_name}}", "{{prefix_female}} {{first_name_female}} {{last_name}}", ) formats_male: Tuple[str, ...] = ( "{{first_name_male}} {{last_name}}", "{{prefix_male}} {{first_name_male}} {{last_name}}", ) formats = formats_male + formats_female first_names_female: Tuple[str, ...] = ( "اصيل", "آلاء", "آيات", "ايمان", "بهجة", "تمام", "بشري", "حياة", "خاشعة", "دانية", "دعاء", "زكية", "نغم", "لارا", "زهرة", "سبأ", "ضحى", "ضياء", "عالية", "مريم", "فداء", "فرات", "فردوس", "كاملة", "كوثر", "هاجر", "هدى", "يسرى", "سجى", "سلسبيل", "شهد", "جنى", "اسماءبناتمختلفةومعانيها:", "ريتاج", "يارا", "وصاف", "ناردين", "ميرا", "مايا", "مادلين", "لينا", "لورا", "وسجايا", "روفيدا", "ديمه", "جيلان", "جوانا", "ألين", "لتين", "تالا", "سديم", "جودي", "ليان", "دانة", "ميار", "لوجين", "ربى", "لورين", "ميرال", "ريتال", "جوليا", "جالا", "جوان", "راما", "هايدي", "ريفال", "إلينا", "أسيل", "لوليا", "ليساء", "ميسون", "جوين", "روبين", "جمان", "ميلاء", "رواء", "أناهيد", "بيسان", "ابتسام", "إباء", "ابتكار", "ابتهاج", "ابتهال", "بوران", "محمد", "بنان", "بيلسان", "بتلاء", "بدرالدّجى", "تاليا", "ترف", "تالا", "ترانيم", "جلنار", "جميلة", "جهراء", "جواهر", "جوريّة", "ريمان", "ريما", "ريناد", "ريفال", "راما", "روعة", "ريما", "ريم", "اعتكاف", "اعتماد", "أغاريد", "افتكار", "أفراح", "أفنان", "لاما", "ليم", "لوليا", "ريمان", "أجوان", "بشرى", "بلسم", "بلقيس", "بلماء", "بلند", "بنان", "بنفسج", "بهابهاء", "بهية", "نوره", "نوف", "نوال", "ناديه", "هدى", "هناء", "هيا", "هند", "هنادي", "وفاء", "ياسمين", "يسرى", "غيداء", "شادن", "جود", "سلاف", "جيلان", "نشوة", "ريان", "دارين", "أحلام", "إخلاص", "أروى", "أريج", "أزهار", "أسرار", "آيات", "ماذى", "تولين", "هيام", "ريناد", "جميلة", "حلا", "عتاب", "كرمة", "ناهد", "غوى", "ريف", "بارعة", "باسمة", "باهرة", "بتول", "بثينة", "أحمد", ) first_names_male: Tuple[str, ...] = ( "تاج", "تامر", "تحسين", "تقي", "تمّام", "تميم", "توفيق", "ترف", "تاج الدّين", "تقيّ الدّين", "ثائر", "ثابت", "ثامر", "ثروت", "ثقيف", "ثاقب", "جابر", "جاد", "جاسم", "جرير", "جسور", "جعفر", "جلاء", "جلال", "جليل", "جمال", "جميل", "جدير", "جرّاح", "جلال الدّين", "جمال الدّين", "جهاد", "حاتم", "حارث", "حازم", "حافظ", "حامد", "حبّاب", "حسام", "حسن", "حسيب", "حسين", "حسني", "حسنين", "حقّي", "حكيم", "حليم", "حمّاد", "حمدان", "حمدي", "حمزة", "حمود", "حميد", "حنبل", "حنفي", "حيّان", "حيدر", "حفيظ", "خاطر", "خافق", "خالد", "خالدي", "خلدون", "خلف", "خلوصي", "خليفة", "خليل", "خميس", "خيري", "خضر", "خطيب", "دؤوب", "داني", "داهي", "داوود", "دريد", "دليل", "دهمان", "ديسم", "ذيب", "ذكي", "ذريع", "رائد", "رائف", "رابح", "راتب", "راجح", "راجي", "رازي", "راشد", "راضي", "راغب", "رامز", "رامي", "رامح", "راني", "راوي", "رؤوف", "رباح", "ريّان", "ربيع", "رجاء", "رجائي", "رجب", "رخاء", "رستم", "رسمي", "رشاد", "رشدي", "رشيد", "رضوان", "رفيق", "رمحي", "رمزي", "رمضان", "رهيف", "روحي", "رافع", "رئيس", "رحيب", "رزين", "راسم", "رضي", "زاخر", "زاكي", "زاهر", "زاهي", "زايد", "زبير", "زغلول", "زكريا", "زكي", "زهدي", "زهران", "زهير", "زياد", "زيد", "زيدان", "زين", "سائد", "ساجد", "سخاء", "ساجي", "ساطع", "سالم", "سامح", "ساهر", "سامر", "سامي", "ساهد", "سراج", "سرحان", "سرور", "سعد", "سعدون", "سعدي", "سعود", "سعيد", "سفيان", "سفير", "سلام", "سلطان", "سلمان", "سليمان", "سموح", "سمير", "سنان", "سنام", "سهل", "سهوان", "سهيل", "سيّد", "سليم", "سراج الدّين", "سيفالدّين", "شادي", "شافع", "شاكر", "شامخ", "شامل", "شبلي", "شبيب", "شدّاد", "شريف", "شعبان", "شعيب", "شفيع", "شعلان", "شكري", "شكيب", "شهب", "شهاب", "شهم", "شهير", "شوقي", "شجاع", "شاطر", "شيّق", "صائب", "صابر", "صاحب", "صادح", "صادق", "صارم", "صافي", "صالح", "صامد", "صباح", "صبحي", "صبري", "صبور", "صبيح", "صخر", "صدقي", "صدّام", "صدّاح", "صعب", "صقر", "صلاح", "صنديد", "صهيب", "صدر الدّين", "صلاح الدّين", "ضاحك", "ضاحي", "ضحّاك", "ضرغام", "ضياء", "ضيائي", "ضياءالدّين", "طائع", "طائف", "طائل", "طارق", "طالب", "طامح", "طاهر", "طبّاع", "طريف", "طلال", "طلعت", "طموح", "طه", "طيّب", "طيّع", "ظافر", "ظبي", "ظريف", "ظهير", "ظاعن", "ظاهر", "عائد", "عابد", "عادل", "عارف", "عاصم", "عاطف", "عاقل", "عاكف", "عالم", "عامر", "عبّاس", "عبّود", "عتريس", "عتيد", "عربي", "عثمان", "عدلي", "عدنان", "عدوي", "عرفات", "عرفه", "عرفان", "عزاز", "عزّت", "عزمي", "عصام", "عصمت", "عطاء", "عفيف", "عقيل", "علاء", "علّام", "علوان", "علي", "عماد", "عمّار", "عمر", "عمران", "عمرو", "عمير", "عاتب", "عتيق", "عذب", "عزيز", "عبد الحقّ", "عبدالله", "عبدالرّحمن", "عزّالدّين", "علاءالدّين", "علم الدّين", "عبد الإله", "مُتعب", "عبد الباري", "عبد الباقي", "عبد التّواب", "عبد الجبّار", "عبد الجليل", "عبد الحفيظ", "عبد الحكيم", "عبد الحليم", "عبد الحيّ", "عبد المحيي", "عبد الخالق", "عبد الرّزاق", "هزار", "عبد الرّشيد", "عبد الرّحمن", "عبد الرّحيم", "عبد الرّؤوف", "عبد السّميع", "عبد الشّكور", "عبد الصّمد", "عبد العليم", "عبد الغفّار", "عبد الغفور", "عبد القادر", "مهنّد", "نزيه", "عبد القدّوس", "عبد القهّار", "عبد الكريم", "عبد اللطيف", "عبد المجيد", "عبد المولى", "عبد العزيز", "عبد السّلام", "عبد الملك", "عبد الواحد", "عبد الغني", "غازي", "غالب", "غالي", "غانم", "غزوان", "غسّان", "غطفان", "غزير", "غامد", "فائق", "فاتح", "فاخر", "فادي", "كامل", "مصعب", "ممتاز", "فرج", "فارس", "فارع", "فاروق", "فاضل", "فالح", "فايد", "فتوح", "فتحي", "فخر", "فخري", "فداء", "فدائي", "فراس", "فرج", "فرحان", "فرزدق", "فضل", "فطين", "فكري", "فلاح", "فهد", "فهمي", "فؤاد", "فوّاز", "فوزي", "فضل", "فيّاض", "فيصل", "فخر الدّين", "قاسم", "قاصد", "قانت", "قائد", "قحطان", "قدري", "قصي", "قنوع", "قيس", "قبس", "قصيد", "قطب", "قطز", "كارم", "كاسر", "كاشف", "كاظم", "كايد", "كافور", "كتوم", "كرم", "كريم", "كسّاب", "كمال", "كنار", "كنعان", "كنان", "كبير", "كليم", "لبيب", "لبيد", "لطفي", "لطوف", "لفيف", "لقمان", "لقاء", "لؤي", "لهفان", "ليث", "لمّاح", "مأمون", "ماجد", "مازن", "مالك", "ماهر", "محمّد", "مُتوكّل", "مُتولي", "مُتيّم", "مجد", "مجاهد", "مَجدي", "محجوب", "محسن", "محفوظ", "محمود", "مختار", "مخلص", "مُخيمر", "مدحت", "مراد", "مرادي", "مرتجي", "مرتقي", "مرزوق", "مرسال", "مرتضي", "مُرسي", "مرشد", "مُرضي", "مرعي", "مروان", "مزهر", "مسرور", "مُسعف", "مَسعد", "مسعود", "مسلم", "مشرف", "مشرق", "مشفق", "مصباح", "مصطفى", "مُصلح", "مطاوع", "مظهر", "مُعتز", "معتوق", "معزّ", "معمّر", "معن", "معين", "مفيد", "مقداد", "مقدام", "مكّي", "مكرّم", "ملهم", "يونس", "ممدوح", "مُناضل", "مناف", "مُنذر", "منيف", "منتصر", "مُنجد", "منسي", "منصور", "مُنير", "منيب", "منيع", "مهدي", "مهران", "مهيب", "موسى", "موفّق", "مؤمن", "مؤنس", "مؤيّد", "ميّاد", "مياس", "ميسور", "ميمون", "ميثاق", "معارف", "محييالدّين", "مشاري", "نائل", "ناجح", "ناجي", "نادر", "نادي", "ناصر", "ناضر", "ناصيف", "ناظم", "ناعم", "نافذ", "نافع", "نبهان", "نبيل", "نبيه", "نبراس", "نورالحقّ", "نجدت", "نجوان", "نجيب", "نديم", "نذير", "نزار", "نسيب", "نشأت", "نصر", "نضال", "نصري", "نصور", "نصوح", "نظام", "نظمي", "نعيم", "نعمان", "نمر", "نوّاف", "نوح", "نوّار", "نور", "نورس", "نشوان", "نوري", "نيازي", "ناصر الدّين", "نصر الدّين", "نور الدّين", "نجم الدّين", "هادي", "هاشم", "هاني", "هايل", "هلال", "هلالي", "همام", "هيكل", "هيمان", "هيثم", "وائل", "واثق", "وادع", "واصف", "واصل", "وثّاب", "وجدي", "وجيه", "وحيد", "ودود", "وديع", "وريد", "وسام", "وسيم", "وسيل", "وصفي", "وضّاح", "وفائي", "وفيق", "وليد", "وليف", "ياسر", "يافع", "ياقوت", "يانع", "يحيى", "يزيد", "يسار", "يسري", "يعرب", "يعقوب", "يقين", "يمام", "يوسف", ) first_names = first_names_male + first_names_female last_names: Tuple[str, ...] = ( "الخالدي", "البديري", "الشهابي", "العفيفي", "جزار", "الخطيب بني جماعة الكناني", "الدجاني", "الغوانمة", "جار الله", "السروري", "الامام", "النقيب", "المفتي", "ابو السعود", "الفتياني", "العلمي", "بو مدين", "نسيبة", "النشاشيبي", "العسلي", "الحسيني", "الجاعوني", "درويش", "الأنصاري", "جودة", "النمري", "قطينة", "الداودي", "العارف", "رصاص", "البخاري", "كمال", "الترجمان الصالح", "غنيم", "المؤقت", "شتية", "شرف", "نور الدين", "الشعباني", "الأيوبي", "الجبشة", "هندية", "البشيتي", "الوعري", "الموسوس", "المظفر", "الترهي", "البغدادي", "الهدمي", "البامية", "الكلغاصي", "اليوزباشي", "المتولي", "اسطمبولي", "الألجاوي", "معتوق", "حب رمان", "القرجولي", "نجم", "طه", "عبده", "سموم", "نجيب", "غوشة", "اهرام", "قرش", "الكالوتي", "حجازي", "زحيكة", "جعفر", "ازحيمان", "الحواش", "القضماني", "طوطح", "الشاويش", "بدرية", "ابو الحاج", "البيطار", "صيام", "قليبو", "ارناؤوط", "الشرفاء", "الحلاق", "المملوك", "السمان", "طقش", "وهبة", "عبد اللطيف", "طزيز", "السيفي", "عويضة", "القطب", "الطحان", "النجار", "القباني", "عكاوي", "الديسي", "الزماميري", "التوتنجي", "الحلواني", "القزاز", "الماني", "الدقاق", "الشامي", "سوميرة", "ابو عيد", "الخلفاوي", "الدسوقي", "المغربي", "أفغاني", "مراد", "زلاطيمو", "سرندح", "مشعشع", "بحمدوني", "بعلبكي", "صيداوي", "صيداني", "طرابلسي", "جزيني", "بيروتي", "عرموني", "متني", "شويفاتي", "مزرعاني", "بتروني", "جبيلي", "اميوني", "زحلاوي", "الساحلي", "القاعي", "القلموني", "البيسار القعقور", "إياد", "الأزد", "الأشراف", "السادة", "الأوس", "أشجع", "ألمع", "أنمار", "بنو الأحمر", "بنو الأحمر بن الحارث", "بنو الأسمر", "بنو أسد", "بنو أمية", "أكلب", "بنو النجار", "البقوم", "أولاد بوعزيز", "بارق", "باهلة", "بجيلة", "بكر بن عبد مناة", "بكر بن وائل", "بديرية", "بلغازي", "بلقرن", "بلي", "بيرقدار", "بني بيات", "بكيل", "ترابين", "تغلب بن وائل", "تميم", "تنوخ", "ثقيف", "الجعليين", "جرهم", "جديس", "جذام", "جهينة", "الحجر بن الهنوء بن الأزد", "الحداء", "الحكم بن سعد العشيرة", "بنو الحارث بن كعب", "حرب", "بنو حنيفة", "حاشد", "حميضة", "حمير", "حوالة", "الحويطات", "الخزرج", "بنو خالد", "خثعم", "خزاعة", "خندف", "خولان", "الدليم", "الدواسر", "بنو الدئل", "دوبلال", "بنو ذي أصبح", "راجح", "بني رشيد", "ربيعة", "الرباب", "الرباطاب", "السادة الراويون", "الزرقان", "زبيد", "أولاد زيان", "بنو زيد", "زهران", "السهول", "بنو سعد بن بكر", "بنو سعد بن ليث بن بكر", "سليم", "سبيع", "الشايقية", "الشحوح", "بنو شعبة", "شمران", "بنو شهر", "بنو شيبان", "بنو شيبة", "شمر", "شهران", "بنو صخر", "بنو ضمرة", "ضبيعة", "طسم", "طيء", "الظفير", "عجرمة (العجارمة)", "العجمان", "العقيدات", "العوازم", "العوالق", "بنو العريج", "عاملة", "بنو عبس", "بنو عجل", "بنو عدي", "بنو عمرو", "عامر بن صعصعة", "عبد القيس", "عتيبة", "عدوان", "عذرة", "عسير", "عليان", "عنز بن وائل", "عنزة", "عنس", "عضل", "بني عطية", "غامد", "غطفان", "بنو فراس", "فراهيد", "فهم", "القواسم", "قحطان", "قريش", "قضاعة", "قيس عيلان", "بنو كنز", "الكواهلة", "بنو كلب", "كنانة", "الكبابيش", "كندة", "كهلان", "الكثيري", "بنو لام", "لخم", "بنو ليث", "المرازيق", "المنتفق", "الموركة", "المهرة", "بنو مالك", "بنو معقل", "بنو مهدي", "مزينة", "مذحج", "مرازيق البقوم", "مضر", "مطير", "ميرفاب", "النمر", "نهد", "بني هاجر", "بنو هاشم", "بنو هلال", "قبيلة هذيل البقوم", "هذيل", "همدان", "هوازن", "بنو ياس", "بنو يعلى", "يافع", "يشكر", ) prefixes_female: Tuple[str, ...] = ( "السيدة", "الآنسة", "الدكتورة", "الأستاذة", "المهندسة", ) prefixes_male: Tuple[str, ...] = ("السيد", "المهندس", "الدكتور", "الأستاذ")
from typing import Tuple from .. import Provider as PersonProvider class Provider(PersonProvider): formats_female: Tuple[str, ...] = ( "{{first_name_female}} {{last_name}}", "{{prefix_female}} {{first_name_female}} {{last_name}}", ) formats_male: Tuple[str, ...] = ( "{{first_name_male}} {{last_name}}", "{{prefix_male}} {{first_name_male}} {{last_name}}", ) formats = formats_male + formats_female first_names_female: Tuple[str, ...] = ( "اصيل", "آلاء", "آيات", "ايمان", "بهجة", "تمام", "بشري", "حياة", "خاشعة", "دانية", "دعاء", "زكية", "نغم", "لارا", "زهرة", "سبأ", "ضحى", "ضياء", "عالية", "مريم", "فداء", "فرات", "فردوس", "كاملة", "كوثر", "هاجر", "هدى", "يسرى", "سجى", "سلسبيل", "شهد", "جنى", "اسماءبناتمختلفةومعانيها:", "ريتاج", "يارا", "وصاف", "ناردين", "ميرا", "مايا", "مادلين", "لينا", "لورا", "وسجايا", "روفيدا", "ديمه", "جيلان", "جوانا", "ألين", "لتين", "تالا", "سديم", "جودي", "ليان", "دانة", "ميار", "لوجين", "ربى", "لورين", "ميرال", "ريتال", "جوليا", "جالا", "جوان", "راما", "هايدي", "ريفال", "إلينا", "أسيل", "لوليا", "ليساء", "ميسون", "جوين", "روبين", "جمان", "ميلاء", "رواء", "أناهيد", "بيسان", "ابتسام", "إباء", "ابتكار", "ابتهاج", "ابتهال", "بوران", "محمد", "بنان", "بيلسان", "بتلاء", "بدرالدّجى", "تاليا", "ترف", "تالا", "ترانيم", "جلنار", "جميلة", "جهراء", "جواهر", "جوريّة", "ريمان", "ريما", "ريناد", "ريفال", "راما", "روعة", "ريما", "ريم", "اعتكاف", "اعتماد", "أغاريد", "افتكار", "أفراح", "أفنان", "لاما", "ليم", "لوليا", "ريمان", "أجوان", "بشرى", "بلسم", "بلقيس", "بلماء", "بلند", "بنان", "بنفسج", "بهابهاء", "بهية", "نوره", "نوف", "نوال", "ناديه", "هدى", "هناء", "هيا", "هند", "هنادي", "وفاء", "ياسمين", "يسرى", "غيداء", "شادن", "جود", "سلاف", "جيلان", "نشوة", "ريان", "دارين", "أحلام", "إخلاص", "أروى", "أريج", "أزهار", "أسرار", "آيات", "ماذى", "تولين", "هيام", "ريناد", "جميلة", "حلا", "عتاب", "كرمة", "ناهد", "غوى", "ريف", "بارعة", "باسمة", "باهرة", "بتول", "بثينة", "أحمد", ) first_names_male: Tuple[str, ...] = ( "تاج", "تامر", "تحسين", "تقي", "تمّام", "تميم", "توفيق", "ترف", "تاج الدّين", "تقيّ الدّين", "ثائر", "ثابت", "ثامر", "ثروت", "ثقيف", "ثاقب", "جابر", "جاد", "جاسم", "جرير", "جسور", "جعفر", "جلاء", "جلال", "جليل", "جمال", "جميل", "جدير", "جرّاح", "جلال الدّين", "جمال الدّين", "جهاد", "حاتم", "حارث", "حازم", "حافظ", "حامد", "حبّاب", "حسام", "حسن", "حسيب", "حسين", "حسني", "حسنين", "حقّي", "حكيم", "حليم", "حمّاد", "حمدان", "حمدي", "حمزة", "حمود", "حميد", "حنبل", "حنفي", "حيّان", "حيدر", "حفيظ", "خاطر", "خافق", "خالد", "خالدي", "خلدون", "خلف", "خلوصي", "خليفة", "خليل", "خميس", "خيري", "خضر", "خطيب", "دؤوب", "داني", "داهي", "داوود", "دريد", "دليل", "دهمان", "ديسم", "ذيب", "ذكي", "ذريع", "رائد", "رائف", "رابح", "راتب", "راجح", "راجي", "رازي", "راشد", "راضي", "راغب", "رامز", "رامي", "رامح", "راني", "راوي", "رؤوف", "رباح", "ريّان", "ربيع", "رجاء", "رجائي", "رجب", "رخاء", "رستم", "رسمي", "رشاد", "رشدي", "رشيد", "رضوان", "رفيق", "رمحي", "رمزي", "رمضان", "رهيف", "روحي", "رافع", "رئيس", "رحيب", "رزين", "راسم", "رضي", "زاخر", "زاكي", "زاهر", "زاهي", "زايد", "زبير", "زغلول", "زكريا", "زكي", "زهدي", "زهران", "زهير", "زياد", "زيد", "زيدان", "زين", "سائد", "ساجد", "سخاء", "ساجي", "ساطع", "سالم", "سامح", "ساهر", "سامر", "سامي", "ساهد", "سراج", "سرحان", "سرور", "سعد", "سعدون", "سعدي", "سعود", "سعيد", "سفيان", "سفير", "سلام", "سلطان", "سلمان", "سليمان", "سموح", "سمير", "سنان", "سنام", "سهل", "سهوان", "سهيل", "سيّد", "سليم", "سراج الدّين", "سيفالدّين", "شادي", "شافع", "شاكر", "شامخ", "شامل", "شبلي", "شبيب", "شدّاد", "شريف", "شعبان", "شعيب", "شفيع", "شعلان", "شكري", "شكيب", "شهب", "شهاب", "شهم", "شهير", "شوقي", "شجاع", "شاطر", "شيّق", "صائب", "صابر", "صاحب", "صادح", "صادق", "صارم", "صافي", "صالح", "صامد", "صباح", "صبحي", "صبري", "صبور", "صبيح", "صخر", "صدقي", "صدّام", "صدّاح", "صعب", "صقر", "صلاح", "صنديد", "صهيب", "صدر الدّين", "صلاح الدّين", "ضاحك", "ضاحي", "ضحّاك", "ضرغام", "ضياء", "ضيائي", "ضياءالدّين", "طائع", "طائف", "طائل", "طارق", "طالب", "طامح", "طاهر", "طبّاع", "طريف", "طلال", "طلعت", "طموح", "طه", "طيّب", "طيّع", "ظافر", "ظبي", "ظريف", "ظهير", "ظاعن", "ظاهر", "عائد", "عابد", "عادل", "عارف", "عاصم", "عاطف", "عاقل", "عاكف", "عالم", "عامر", "عبّاس", "عبّود", "عتريس", "عتيد", "عربي", "عثمان", "عدلي", "عدنان", "عدوي", "عرفات", "عرفه", "عرفان", "عزاز", "عزّت", "عزمي", "عصام", "عصمت", "عطاء", "عفيف", "عقيل", "علاء", "علّام", "علوان", "علي", "عماد", "عمّار", "عمر", "عمران", "عمرو", "عمير", "عاتب", "عتيق", "عذب", "عزيز", "عبد الحقّ", "عبدالله", "عبدالرّحمن", "عزّالدّين", "علاءالدّين", "علم الدّين", "عبد الإله", "مُتعب", "عبد الباري", "عبد الباقي", "عبد التّواب", "عبد الجبّار", "عبد الجليل", "عبد الحفيظ", "عبد الحكيم", "عبد الحليم", "عبد الحيّ", "عبد المحيي", "عبد الخالق", "عبد الرّزاق", "هزار", "عبد الرّشيد", "عبد الرّحمن", "عبد الرّحيم", "عبد الرّؤوف", "عبد السّميع", "عبد الشّكور", "عبد الصّمد", "عبد العليم", "عبد الغفّار", "عبد الغفور", "عبد القادر", "مهنّد", "نزيه", "عبد القدّوس", "عبد القهّار", "عبد الكريم", "عبد اللطيف", "عبد المجيد", "عبد المولى", "عبد العزيز", "عبد السّلام", "عبد الملك", "عبد الواحد", "عبد الغني", "غازي", "غالب", "غالي", "غانم", "غزوان", "غسّان", "غطفان", "غزير", "غامد", "فائق", "فاتح", "فاخر", "فادي", "كامل", "مصعب", "ممتاز", "فرج", "فارس", "فارع", "فاروق", "فاضل", "فالح", "فايد", "فتوح", "فتحي", "فخر", "فخري", "فداء", "فدائي", "فراس", "فرج", "فرحان", "فرزدق", "فضل", "فطين", "فكري", "فلاح", "فهد", "فهمي", "فؤاد", "فوّاز", "فوزي", "فضل", "فيّاض", "فيصل", "فخر الدّين", "قاسم", "قاصد", "قانت", "قائد", "قحطان", "قدري", "قصي", "قنوع", "قيس", "قبس", "قصيد", "قطب", "قطز", "كارم", "كاسر", "كاشف", "كاظم", "كايد", "كافور", "كتوم", "كرم", "كريم", "كسّاب", "كمال", "كنار", "كنعان", "كنان", "كبير", "كليم", "لبيب", "لبيد", "لطفي", "لطوف", "لفيف", "لقمان", "لقاء", "لؤي", "لهفان", "ليث", "لمّاح", "مأمون", "ماجد", "مازن", "مالك", "ماهر", "محمّد", "مُتوكّل", "مُتولي", "مُتيّم", "مجد", "مجاهد", "مَجدي", "محجوب", "محسن", "محفوظ", "محمود", "مختار", "مخلص", "مُخيمر", "مدحت", "مراد", "مرادي", "مرتجي", "مرتقي", "مرزوق", "مرسال", "مرتضي", "مُرسي", "مرشد", "مُرضي", "مرعي", "مروان", "مزهر", "مسرور", "مُسعف", "مَسعد", "مسعود", "مسلم", "مشرف", "مشرق", "مشفق", "مصباح", "مصطفى", "مُصلح", "مطاوع", "مظهر", "مُعتز", "معتوق", "معزّ", "معمّر", "معن", "معين", "مفيد", "مقداد", "مقدام", "مكّي", "مكرّم", "ملهم", "يونس", "ممدوح", "مُناضل", "مناف", "مُنذر", "منيف", "منتصر", "مُنجد", "منسي", "منصور", "مُنير", "منيب", "منيع", "مهدي", "مهران", "مهيب", "موسى", "موفّق", "مؤمن", "مؤنس", "مؤيّد", "ميّاد", "مياس", "ميسور", "ميمون", "ميثاق", "معارف", "محييالدّين", "مشاري", "نائل", "ناجح", "ناجي", "نادر", "نادي", "ناصر", "ناضر", "ناصيف", "ناظم", "ناعم", "نافذ", "نافع", "نبهان", "نبيل", "نبيه", "نبراس", "نورالحقّ", "نجدت", "نجوان", "نجيب", "نديم", "نذير", "نزار", "نسيب", "نشأت", "نصر", "نضال", "نصري", "نصور", "نصوح", "نظام", "نظمي", "نعيم", "نعمان", "نمر", "نوّاف", "نوح", "نوّار", "نور", "نورس", "نشوان", "نوري", "نيازي", "ناصر الدّين", "نصر الدّين", "نور الدّين", "نجم الدّين", "هادي", "هاشم", "هاني", "هايل", "هلال", "هلالي", "همام", "هيكل", "هيمان", "هيثم", "وائل", "واثق", "وادع", "واصف", "واصل", "وثّاب", "وجدي", "وجيه", "وحيد", "ودود", "وديع", "وريد", "وسام", "وسيم", "وسيل", "وصفي", "وضّاح", "وفائي", "وفيق", "وليد", "وليف", "ياسر", "يافع", "ياقوت", "يانع", "يحيى", "يزيد", "يسار", "يسري", "يعرب", "يعقوب", "يقين", "يمام", "يوسف", ) first_names = first_names_male + first_names_female last_names: Tuple[str, ...] = ( "الخالدي", "البديري", "الشهابي", "العفيفي", "جزار", "الخطيب بني جماعة الكناني", "الدجاني", "الغوانمة", "جار الله", "السروري", "الامام", "النقيب", "المفتي", "ابو السعود", "الفتياني", "العلمي", "بو مدين", "نسيبة", "النشاشيبي", "العسلي", "الحسيني", "الجاعوني", "درويش", "الأنصاري", "جودة", "النمري", "قطينة", "الداودي", "العارف", "رصاص", "البخاري", "كمال", "الترجمان الصالح", "غنيم", "المؤقت", "شتية", "شرف", "نور الدين", "الشعباني", "الأيوبي", "الجبشة", "هندية", "البشيتي", "الوعري", "الموسوس", "المظفر", "الترهي", "البغدادي", "الهدمي", "البامية", "الكلغاصي", "اليوزباشي", "المتولي", "اسطمبولي", "الألجاوي", "معتوق", "حب رمان", "القرجولي", "نجم", "طه", "عبده", "سموم", "نجيب", "غوشة", "اهرام", "قرش", "الكالوتي", "حجازي", "زحيكة", "جعفر", "ازحيمان", "الحواش", "القضماني", "طوطح", "الشاويش", "بدرية", "ابو الحاج", "البيطار", "صيام", "قليبو", "ارناؤوط", "الشرفاء", "الحلاق", "المملوك", "السمان", "طقش", "وهبة", "عبد اللطيف", "طزيز", "السيفي", "عويضة", "القطب", "الطحان", "النجار", "القباني", "عكاوي", "الديسي", "الزماميري", "التوتنجي", "الحلواني", "القزاز", "الماني", "الدقاق", "الشامي", "سوميرة", "ابو عيد", "الخلفاوي", "الدسوقي", "المغربي", "أفغاني", "مراد", "زلاطيمو", "سرندح", "مشعشع", "بحمدوني", "بعلبكي", "صيداوي", "صيداني", "طرابلسي", "جزيني", "بيروتي", "عرموني", "متني", "شويفاتي", "مزرعاني", "بتروني", "جبيلي", "اميوني", "زحلاوي", "الساحلي", "القاعي", "القلموني", "البيسار القعقور", "إياد", "الأزد", "الأشراف", "السادة", "الأوس", "أشجع", "ألمع", "أنمار", "بنو الأحمر", "بنو الأحمر بن الحارث", "بنو الأسمر", "بنو أسد", "بنو أمية", "أكلب", "بنو النجار", "البقوم", "أولاد بوعزيز", "بارق", "باهلة", "بجيلة", "بكر بن عبد مناة", "بكر بن وائل", "بديرية", "بلغازي", "بلقرن", "بلي", "بيرقدار", "بني بيات", "بكيل", "ترابين", "تغلب بن وائل", "تميم", "تنوخ", "ثقيف", "الجعليين", "جرهم", "جديس", "جذام", "جهينة", "الحجر بن الهنوء بن الأزد", "الحداء", "الحكم بن سعد العشيرة", "بنو الحارث بن كعب", "حرب", "بنو حنيفة", "حاشد", "حميضة", "حمير", "حوالة", "الحويطات", "الخزرج", "بنو خالد", "خثعم", "خزاعة", "خندف", "خولان", "الدليم", "الدواسر", "بنو الدئل", "دوبلال", "بنو ذي أصبح", "راجح", "بني رشيد", "ربيعة", "الرباب", "الرباطاب", "السادة الراويون", "الزرقان", "زبيد", "أولاد زيان", "بنو زيد", "زهران", "السهول", "بنو سعد بن بكر", "بنو سعد بن ليث بن بكر", "سليم", "سبيع", "الشايقية", "الشحوح", "بنو شعبة", "شمران", "بنو شهر", "بنو شيبان", "بنو شيبة", "شمر", "شهران", "بنو صخر", "بنو ضمرة", "ضبيعة", "طسم", "طيء", "الظفير", "عجرمة (العجارمة)", "العجمان", "العقيدات", "العوازم", "العوالق", "بنو العريج", "عاملة", "بنو عبس", "بنو عجل", "بنو عدي", "بنو عمرو", "عامر بن صعصعة", "عبد القيس", "عتيبة", "عدوان", "عذرة", "عسير", "عليان", "عنز بن وائل", "عنزة", "عنس", "عضل", "بني عطية", "غامد", "غطفان", "بنو فراس", "فراهيد", "فهم", "القواسم", "قحطان", "قريش", "قضاعة", "قيس عيلان", "بنو كنز", "الكواهلة", "بنو كلب", "كنانة", "الكبابيش", "كندة", "كهلان", "الكثيري", "بنو لام", "لخم", "بنو ليث", "المرازيق", "المنتفق", "الموركة", "المهرة", "بنو مالك", "بنو معقل", "بنو مهدي", "مزينة", "مذحج", "مرازيق البقوم", "مضر", "مطير", "ميرفاب", "النمر", "نهد", "بني هاجر", "بنو هاشم", "بنو هلال", "قبيلة هذيل البقوم", "هذيل", "همدان", "هوازن", "بنو ياس", "بنو يعلى", "يافع", "يشكر", ) prefixes_female: Tuple[str, ...] = ( "السيدة", "الآنسة", "الدكتورة", "الأستاذة", "المهندسة", ) prefixes_male: Tuple[str, ...] = ("السيد", "المهندس", "الدكتور", "الأستاذ")
none
1
2.905559
3
sdk/python/pulumi_gcp/spanner/_inputs.py
sisisin/pulumi-gcp
121
6631518
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'DatabaseEncryptionConfigArgs', 'DatabaseIAMBindingConditionArgs', 'DatabaseIAMMemberConditionArgs', 'InstanceIAMBindingConditionArgs', 'InstanceIAMMemberConditionArgs', ] @pulumi.input_type class DatabaseEncryptionConfigArgs: def __init__(__self__, *, kms_key_name: pulumi.Input[str]): """ :param pulumi.Input[str] kms_key_name: Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database. """ pulumi.set(__self__, "kms_key_name", kms_key_name) @property @pulumi.getter(name="kmsKeyName") def kms_key_name(self) -> pulumi.Input[str]: """ Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database. """ return pulumi.get(self, "kms_key_name") @kms_key_name.setter def kms_key_name(self, value: pulumi.Input[str]): pulumi.set(self, "kms_key_name", value) @pulumi.input_type class DatabaseIAMBindingConditionArgs: def __init__(__self__, *, expression: pulumi.Input[str], title: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "expression", expression) pulumi.set(__self__, "title", title) if description is not None: pulumi.set(__self__, "description", description) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, "expression") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, "expression", value) @property @pulumi.getter def title(self) -> pulumi.Input[str]: return pulumi.get(self, "title") @title.setter def title(self, value: pulumi.Input[str]): pulumi.set(self, "title", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @pulumi.input_type class DatabaseIAMMemberConditionArgs: def __init__(__self__, *, expression: pulumi.Input[str], title: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "expression", expression) pulumi.set(__self__, "title", title) if description is not None: pulumi.set(__self__, "description", description) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, "expression") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, "expression", value) @property @pulumi.getter def title(self) -> pulumi.Input[str]: return pulumi.get(self, "title") @title.setter def title(self, value: pulumi.Input[str]): pulumi.set(self, "title", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @pulumi.input_type class InstanceIAMBindingConditionArgs: def __init__(__self__, *, expression: pulumi.Input[str], title: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "expression", expression) pulumi.set(__self__, "title", title) if description is not None: pulumi.set(__self__, "description", description) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, "expression") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, "expression", value) @property @pulumi.getter def title(self) -> pulumi.Input[str]: return pulumi.get(self, "title") @title.setter def title(self, value: pulumi.Input[str]): pulumi.set(self, "title", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @pulumi.input_type class InstanceIAMMemberConditionArgs: def __init__(__self__, *, expression: pulumi.Input[str], title: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "expression", expression) pulumi.set(__self__, "title", title) if description is not None: pulumi.set(__self__, "description", description) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, "expression") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, "expression", value) @property @pulumi.getter def title(self) -> pulumi.Input[str]: return pulumi.get(self, "title") @title.setter def title(self, value: pulumi.Input[str]): pulumi.set(self, "title", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value)
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'DatabaseEncryptionConfigArgs', 'DatabaseIAMBindingConditionArgs', 'DatabaseIAMMemberConditionArgs', 'InstanceIAMBindingConditionArgs', 'InstanceIAMMemberConditionArgs', ] @pulumi.input_type class DatabaseEncryptionConfigArgs: def __init__(__self__, *, kms_key_name: pulumi.Input[str]): """ :param pulumi.Input[str] kms_key_name: Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database. """ pulumi.set(__self__, "kms_key_name", kms_key_name) @property @pulumi.getter(name="kmsKeyName") def kms_key_name(self) -> pulumi.Input[str]: """ Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database. """ return pulumi.get(self, "kms_key_name") @kms_key_name.setter def kms_key_name(self, value: pulumi.Input[str]): pulumi.set(self, "kms_key_name", value) @pulumi.input_type class DatabaseIAMBindingConditionArgs: def __init__(__self__, *, expression: pulumi.Input[str], title: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "expression", expression) pulumi.set(__self__, "title", title) if description is not None: pulumi.set(__self__, "description", description) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, "expression") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, "expression", value) @property @pulumi.getter def title(self) -> pulumi.Input[str]: return pulumi.get(self, "title") @title.setter def title(self, value: pulumi.Input[str]): pulumi.set(self, "title", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @pulumi.input_type class DatabaseIAMMemberConditionArgs: def __init__(__self__, *, expression: pulumi.Input[str], title: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "expression", expression) pulumi.set(__self__, "title", title) if description is not None: pulumi.set(__self__, "description", description) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, "expression") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, "expression", value) @property @pulumi.getter def title(self) -> pulumi.Input[str]: return pulumi.get(self, "title") @title.setter def title(self, value: pulumi.Input[str]): pulumi.set(self, "title", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @pulumi.input_type class InstanceIAMBindingConditionArgs: def __init__(__self__, *, expression: pulumi.Input[str], title: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "expression", expression) pulumi.set(__self__, "title", title) if description is not None: pulumi.set(__self__, "description", description) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, "expression") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, "expression", value) @property @pulumi.getter def title(self) -> pulumi.Input[str]: return pulumi.get(self, "title") @title.setter def title(self, value: pulumi.Input[str]): pulumi.set(self, "title", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @pulumi.input_type class InstanceIAMMemberConditionArgs: def __init__(__self__, *, expression: pulumi.Input[str], title: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "expression", expression) pulumi.set(__self__, "title", title) if description is not None: pulumi.set(__self__, "description", description) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, "expression") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, "expression", value) @property @pulumi.getter def title(self) -> pulumi.Input[str]: return pulumi.get(self, "title") @title.setter def title(self, value: pulumi.Input[str]): pulumi.set(self, "title", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value)
en
0.919155
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** :param pulumi.Input[str] kms_key_name: Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database. Fully qualified name of the KMS key to use to encrypt this database. This key must exist in the same location as the Spanner Database.
1.973987
2
units/src/convert_units/measurements.py
tinnuadan/redcogs
1
6631519
import json from typing import Dict, List class Measure(): """ A single measure (e.g. kg). Contains the conversion to the anchor, the unit, name, aliases and the parent Measurement collection""" def __init__(self, parent): self.unit: str = "" self.aliases: List = [] self.name: str = "" self.to_anchor: float = 1 self.anchor_shift: float = 0 self.parent: Measurement = parent def __str__(self): aliases = list(self.aliases) aliases.remove(self.name) res = "%s (%s). Also known as %s. To anchor: %.2f. Anchor shift: %.2f" %(self.unit, self.name, ", ".join(aliases), self.to_anchor, self.anchor_shift) return res class Anchor(): """ Holds which Measure is used as the anchor of a Measurement """ def __init__(self): self.measure: Measure = None self.ratio: float = 1 self.transform = None class Measurement(): """ Holds a collection of measures (metric or imperial), which have a common anchor they can be converted to """ def __init__(self, name: str): self.name: str = name self.anchor: Anchor = Anchor() self.measures: List = [] self.is_metric: bool = False def hasUnit(self, unit: str): for m in self.measures: if m.unit == unit: return True return False def autoConvertFraction(frac: any): if isinstance(frac, str): tmp = frac.split("/") #ratio return float(tmp[0]) / float(tmp[1]) else: return frac def loadMeasurement(name: str, definition: Dict, is_metric: bool): result: Measurement = Measurement(name) result.is_metric = is_metric anchor: Anchor = result.anchor anchor_unit: str = None for kt, vt in definition.items(): k: str = kt v: Dict = vt if k == "_anchor": anchor_unit = v["unit"] if "transform" in v.keys(): anchor.transform = eval("lambda x: %s" % v["transform"]) else: anchor.ratio = autoConvertFraction(v["ratio"]) elif k[0] != "_": m: Measure = Measure(result) m.unit = k if "aliases" in v.keys(): m.aliases = v["aliases"] m.aliases.append(v["name"]["singular"]) m.aliases.append(v["name"]["plural"]) m.name = v["name"]["singular"] m.to_anchor = autoConvertFraction(v["to_anchor"]) result.measures.append(m) for m in result.measures: if m.unit == anchor_unit: anchor.measure = m break if not result.anchor.measure: raise Exception("Basic measure for anchor not found") return result
import json from typing import Dict, List class Measure(): """ A single measure (e.g. kg). Contains the conversion to the anchor, the unit, name, aliases and the parent Measurement collection""" def __init__(self, parent): self.unit: str = "" self.aliases: List = [] self.name: str = "" self.to_anchor: float = 1 self.anchor_shift: float = 0 self.parent: Measurement = parent def __str__(self): aliases = list(self.aliases) aliases.remove(self.name) res = "%s (%s). Also known as %s. To anchor: %.2f. Anchor shift: %.2f" %(self.unit, self.name, ", ".join(aliases), self.to_anchor, self.anchor_shift) return res class Anchor(): """ Holds which Measure is used as the anchor of a Measurement """ def __init__(self): self.measure: Measure = None self.ratio: float = 1 self.transform = None class Measurement(): """ Holds a collection of measures (metric or imperial), which have a common anchor they can be converted to """ def __init__(self, name: str): self.name: str = name self.anchor: Anchor = Anchor() self.measures: List = [] self.is_metric: bool = False def hasUnit(self, unit: str): for m in self.measures: if m.unit == unit: return True return False def autoConvertFraction(frac: any): if isinstance(frac, str): tmp = frac.split("/") #ratio return float(tmp[0]) / float(tmp[1]) else: return frac def loadMeasurement(name: str, definition: Dict, is_metric: bool): result: Measurement = Measurement(name) result.is_metric = is_metric anchor: Anchor = result.anchor anchor_unit: str = None for kt, vt in definition.items(): k: str = kt v: Dict = vt if k == "_anchor": anchor_unit = v["unit"] if "transform" in v.keys(): anchor.transform = eval("lambda x: %s" % v["transform"]) else: anchor.ratio = autoConvertFraction(v["ratio"]) elif k[0] != "_": m: Measure = Measure(result) m.unit = k if "aliases" in v.keys(): m.aliases = v["aliases"] m.aliases.append(v["name"]["singular"]) m.aliases.append(v["name"]["plural"]) m.name = v["name"]["singular"] m.to_anchor = autoConvertFraction(v["to_anchor"]) result.measures.append(m) for m in result.measures: if m.unit == anchor_unit: anchor.measure = m break if not result.anchor.measure: raise Exception("Basic measure for anchor not found") return result
en
0.898162
A single measure (e.g. kg). Contains the conversion to the anchor, the unit, name, aliases and the parent Measurement collection Holds which Measure is used as the anchor of a Measurement Holds a collection of measures (metric or imperial), which have a common anchor they can be converted to #ratio
3.447145
3
google/cloud/forseti/scanner/audit/ke_rules_engine.py
Sandesh36/forseti-security
0
6631520
<gh_stars>0 # Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rules engine for checking arbitrary properties ofKE clusters.""" from collections import namedtuple import threading import jmespath from google.cloud.forseti.common.gcp_type import errors as resource_errors from google.cloud.forseti.common.gcp_type import resource as resource_mod from google.cloud.forseti.common.gcp_type import resource_util from google.cloud.forseti.common.util import logger from google.cloud.forseti.scanner.audit import base_rules_engine as bre from google.cloud.forseti.scanner.audit import errors as audit_errors LOGGER = logger.get_logger(__name__) class KeRulesEngine(bre.BaseRulesEngine): """Rules engine for KE scanner.""" def __init__(self, rules_file_path, snapshot_timestamp=None): """Initialize. Args: rules_file_path (str): file location of rules snapshot_timestamp (str): snapshot timestamp. Defaults to None. If set, this will be the snapshot timestamp used in the engine. """ super(KeRulesEngine, self).__init__(rules_file_path=rules_file_path) self.rule_book = None self.snapshot_timestamp = snapshot_timestamp self._lock = threading.Lock() def build_rule_book(self, global_configs=None): """Build KeRuleBook from the rules definition file. Args: global_configs (dict): Global configurations. """ with self._lock: self.rule_book = KeRuleBook( self._load_rule_definitions()) # TODO: The naming is confusing and needs to be fixed in all scanners. def find_policy_violations(self, ke_cluster, force_rebuild=False): """Check if KE cluster satisfies provided rules. Args: ke_cluster (KeCluster): A KE Cluster object to check. force_rebuild (bool): If True, rebuilds the rule book. This will reload the rules definition file and add the rules to the book. Returns: generator: A generator of rule violations. """ if self.rule_book is None or force_rebuild: self.build_rule_book() return self.rule_book.find_violations(ke_cluster) class KeRuleBook(bre.BaseRuleBook): """The RuleBook for KE rules.""" def __init__(self, rule_defs=None): """Initialization. Args: rule_defs (list): KE rule definition dicts """ super(KeRuleBook, self).__init__() self._lock = threading.Lock() self.resource_rules_map = {} if not rule_defs: self.rule_defs = {} else: self.rule_defs = rule_defs self.add_rules(rule_defs) def add_rules(self, rule_defs): """Add rules to the rule book. Args: rule_defs (dict): rule definitions dictionary """ for (i, rule) in enumerate(rule_defs.get('rules', [])): self.add_rule(rule, i) def add_rule(self, rule_def, rule_index): """Add a rule to the rule book. Args: rule_def (dict): A dictionary containing rule definition properties. rule_index (int): The index of the rule from the rule definitions. Assigned automatically when the rule book is built. """ with self._lock: for resource in rule_def.get('resource'): resource_ids = resource.get('resource_ids') resource_type = None try: resource_type = resource_mod.ResourceType.verify( resource.get('type')) except resource_errors.InvalidResourceTypeError: raise audit_errors.InvalidRulesSchemaError( 'Missing resource type in rule {}'.format(rule_index)) if not resource_ids or len(resource_ids) < 1: raise audit_errors.InvalidRulesSchemaError( 'Missing resource ids in rule {}'.format(rule_index)) rule_mode = rule_def.get('mode') if rule_mode not in ('blacklist', 'whitelist'): raise audit_errors.InvalidRulesSchemaError( 'Unknown mode in rule {}'.format(rule_index)) rule_key = rule_def.get('key') if rule_key is None: raise audit_errors.InvalidRulesSchemaError( 'Missing key in rule {}'.format(rule_index)) rule_values = rule_def.get('values', []) # For each resource id associated with the rule, create a # mapping of resource => rules. for resource_id in resource_ids: gcp_resource = resource_util.create_resource( resource_id=resource_id, resource_type=resource_type) rule = Rule( rule_def.get('name'), rule_index, rule_mode, rule_key, rule_values, ) resource_rules = self.resource_rules_map.setdefault( gcp_resource, ResourceRules(resource=gcp_resource)) if rule not in resource_rules.rules: resource_rules.rules.add(rule) # pylint: enable=invalid-name def get_resource_rules(self, resource): """Get all the resource rules for resource. Args: resource (Resource): The gcp_type Resource find in the map. Returns: ResourceRules: A ResourceRules object. """ return self.resource_rules_map.get(resource) def find_violations(self, ke_cluster): """Find violations in the rule book. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. Returns: list: RuleViolation """ LOGGER.debug('Looking for KE violations: %r', ke_cluster) violations = [] resource_ancestors = resource_util.get_ancestors_from_full_name( ke_cluster.resource_full_name) LOGGER.debug('Ancestors of resource: %r', resource_ancestors) checked_wildcards = set() for curr_resource in resource_ancestors: if not curr_resource: # resource_ancestors will contain all the resources including # the child resource, which has type kubernetes cluster and # cannot be created (return None) as part of the ancestor path, # we will skip the child as it's not part of the ancestor. continue resource_rules = self.get_resource_rules(curr_resource) if resource_rules: violations.extend( resource_rules.find_policy_violations(ke_cluster)) wildcard_resource = resource_util.create_resource( resource_id='*', resource_type=curr_resource.type) if wildcard_resource in checked_wildcards: continue checked_wildcards.add(wildcard_resource) resource_rules = self.get_resource_rules(wildcard_resource) if resource_rules: violations.extend( resource_rules.find_policy_violations(ke_cluster)) LOGGER.debug('Returning violations: %r', violations) return violations class ResourceRules(object): """An association of a resource to rules.""" def __init__(self, resource=None, rules=None): """Initialize. Args: resource (Resource): The resource to associate with the rule. rules (set): rules to associate with the resource. """ if not isinstance(rules, set): rules = set([]) self.resource = resource self.rules = rules def find_policy_violations(self, ke_cluster): """Determine if the policy binding matches this rule's criteria. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. Returns: list: RuleViolation """ violations = [] for rule in self.rules: rule_violations = rule.find_policy_violations(ke_cluster) if rule_violations: violations.extend(rule_violations) return violations def __eq__(self, other): """Compare == with another object. Args: other (ResourceRules): object to compare with Returns: int: comparison result """ if not isinstance(other, type(self)): return NotImplemented return (self.resource == other.resource and self.rules == other.rules) def __ne__(self, other): """Compare != with another object. Args: other (object): object to compare with Returns: int: comparison result """ return not self == other def __repr__(self): """String representation of this node. Returns: str: debug string """ return 'IapResourceRules<resource={}, rules={}>'.format( self.resource, self.rules) class Rule(object): """Rule properties from the rule definition file, also finds violations.""" def __init__(self, rule_name, rule_index, rule_mode, rule_key, rule_values): """Initialize. Args: rule_name (str): Name of the loaded rule rule_index (int): The index of the rule from the rule definitions rule_mode (str): blacklist or whitelist rule_key (str): jmespath pointing to the desired key rule_values (list): list of values, interpreted per mode """ self.rule_name = rule_name self.rule_index = rule_index self.rule_mode = rule_mode self.rule_key = rule_key self.rule_values = rule_values # compile right away to return exceptions asap self.rule_jmespath = jmespath.compile(self.rule_key) # TODO: The naming is confusing and needs to be fixed in all scanners. def find_policy_violations(self, ke_cluster): """Find KE violations in based on the rule. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. Returns: list: Returns a list of RuleViolation named tuples """ violations = [] actual = self.rule_jmespath.search(ke_cluster.as_dict) LOGGER.debug('actual jmespath result: %s', actual) if self.rule_mode == 'whitelist': if actual not in self.rule_values: violations.append(self._make_violation( ke_cluster, '%s has value %s, which is not in the whitelist (%s)' % ( self.rule_jmespath.expression, actual, self.rule_values, ), actual, )) if self.rule_mode == 'blacklist': if actual in self.rule_values: violations.append(self._make_violation( ke_cluster, '%s has value %s, which is in the blacklist (%s)' % ( self.rule_jmespath.expression, actual, self.rule_values, ), actual, )) return violations def _make_violation(self, ke_cluster, violation_reason, actual): """Build a RuleViolation for the cluster. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. violation_reason (str): The violation details. actual (object): The actual value of the jmespath expression. Returns: RuleViolation: A new RuleViolation namedtuple. """ return RuleViolation( resource_type=resource_mod.ResourceType.KE_CLUSTER, resource_id=ke_cluster.name, full_name=ke_cluster.resource_full_name, rule_name=self.rule_name, rule_index=self.rule_index, rule_mode=self.rule_mode, rule_values=self.rule_values, actual_value=actual, violation_type='KE_VIOLATION', violation_reason=violation_reason, project_id=ke_cluster.project_id, cluster_name=ke_cluster.name, resource_data=str(ke_cluster), resource_name=ke_cluster.name, ) def __eq__(self, other): """Test whether Rule equals other Rule. Args: other (Rule): object to compare to Returns: int: comparison result """ if not isinstance(other, type(self)): return NotImplemented return all( self.rule_name == other.rule_name, self.rule_index == other.rule_index, self.rule_mode == other.rule_mode, self.rule_values == other.rule_values, ) def __ne__(self, other): """Test whether Rule is not equal to another Rule. Args: other (object): object to compare to Returns: int: comparison result """ return not self == other def __hash__(self): """Make a hash of the rule index. For now, this will suffice since the rule index is assigned automatically when the rules map is built, and the scanner only handles one rule file at a time. Later on, we'll need to revisit this hash method when we process multiple rule files. Returns: int: The hash of the rule index. """ return hash(self.rule_index) # pylint: enable=inconsistent-return-statements # Rule violation. # resource_type: string # resource_id: string # rule_name: string # rule_index: int # violation_type: KE_VIOLATION # violation_reason: string # project_id: string # cluster_name: string RuleViolation = namedtuple('RuleViolation', [ 'resource_type', 'resource_id', 'full_name', 'rule_name', 'rule_index', 'rule_mode', 'rule_values', 'actual_value', 'violation_type', 'violation_reason', 'project_id', 'cluster_name', 'resource_data', 'resource_name', ])
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rules engine for checking arbitrary properties ofKE clusters.""" from collections import namedtuple import threading import jmespath from google.cloud.forseti.common.gcp_type import errors as resource_errors from google.cloud.forseti.common.gcp_type import resource as resource_mod from google.cloud.forseti.common.gcp_type import resource_util from google.cloud.forseti.common.util import logger from google.cloud.forseti.scanner.audit import base_rules_engine as bre from google.cloud.forseti.scanner.audit import errors as audit_errors LOGGER = logger.get_logger(__name__) class KeRulesEngine(bre.BaseRulesEngine): """Rules engine for KE scanner.""" def __init__(self, rules_file_path, snapshot_timestamp=None): """Initialize. Args: rules_file_path (str): file location of rules snapshot_timestamp (str): snapshot timestamp. Defaults to None. If set, this will be the snapshot timestamp used in the engine. """ super(KeRulesEngine, self).__init__(rules_file_path=rules_file_path) self.rule_book = None self.snapshot_timestamp = snapshot_timestamp self._lock = threading.Lock() def build_rule_book(self, global_configs=None): """Build KeRuleBook from the rules definition file. Args: global_configs (dict): Global configurations. """ with self._lock: self.rule_book = KeRuleBook( self._load_rule_definitions()) # TODO: The naming is confusing and needs to be fixed in all scanners. def find_policy_violations(self, ke_cluster, force_rebuild=False): """Check if KE cluster satisfies provided rules. Args: ke_cluster (KeCluster): A KE Cluster object to check. force_rebuild (bool): If True, rebuilds the rule book. This will reload the rules definition file and add the rules to the book. Returns: generator: A generator of rule violations. """ if self.rule_book is None or force_rebuild: self.build_rule_book() return self.rule_book.find_violations(ke_cluster) class KeRuleBook(bre.BaseRuleBook): """The RuleBook for KE rules.""" def __init__(self, rule_defs=None): """Initialization. Args: rule_defs (list): KE rule definition dicts """ super(KeRuleBook, self).__init__() self._lock = threading.Lock() self.resource_rules_map = {} if not rule_defs: self.rule_defs = {} else: self.rule_defs = rule_defs self.add_rules(rule_defs) def add_rules(self, rule_defs): """Add rules to the rule book. Args: rule_defs (dict): rule definitions dictionary """ for (i, rule) in enumerate(rule_defs.get('rules', [])): self.add_rule(rule, i) def add_rule(self, rule_def, rule_index): """Add a rule to the rule book. Args: rule_def (dict): A dictionary containing rule definition properties. rule_index (int): The index of the rule from the rule definitions. Assigned automatically when the rule book is built. """ with self._lock: for resource in rule_def.get('resource'): resource_ids = resource.get('resource_ids') resource_type = None try: resource_type = resource_mod.ResourceType.verify( resource.get('type')) except resource_errors.InvalidResourceTypeError: raise audit_errors.InvalidRulesSchemaError( 'Missing resource type in rule {}'.format(rule_index)) if not resource_ids or len(resource_ids) < 1: raise audit_errors.InvalidRulesSchemaError( 'Missing resource ids in rule {}'.format(rule_index)) rule_mode = rule_def.get('mode') if rule_mode not in ('blacklist', 'whitelist'): raise audit_errors.InvalidRulesSchemaError( 'Unknown mode in rule {}'.format(rule_index)) rule_key = rule_def.get('key') if rule_key is None: raise audit_errors.InvalidRulesSchemaError( 'Missing key in rule {}'.format(rule_index)) rule_values = rule_def.get('values', []) # For each resource id associated with the rule, create a # mapping of resource => rules. for resource_id in resource_ids: gcp_resource = resource_util.create_resource( resource_id=resource_id, resource_type=resource_type) rule = Rule( rule_def.get('name'), rule_index, rule_mode, rule_key, rule_values, ) resource_rules = self.resource_rules_map.setdefault( gcp_resource, ResourceRules(resource=gcp_resource)) if rule not in resource_rules.rules: resource_rules.rules.add(rule) # pylint: enable=invalid-name def get_resource_rules(self, resource): """Get all the resource rules for resource. Args: resource (Resource): The gcp_type Resource find in the map. Returns: ResourceRules: A ResourceRules object. """ return self.resource_rules_map.get(resource) def find_violations(self, ke_cluster): """Find violations in the rule book. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. Returns: list: RuleViolation """ LOGGER.debug('Looking for KE violations: %r', ke_cluster) violations = [] resource_ancestors = resource_util.get_ancestors_from_full_name( ke_cluster.resource_full_name) LOGGER.debug('Ancestors of resource: %r', resource_ancestors) checked_wildcards = set() for curr_resource in resource_ancestors: if not curr_resource: # resource_ancestors will contain all the resources including # the child resource, which has type kubernetes cluster and # cannot be created (return None) as part of the ancestor path, # we will skip the child as it's not part of the ancestor. continue resource_rules = self.get_resource_rules(curr_resource) if resource_rules: violations.extend( resource_rules.find_policy_violations(ke_cluster)) wildcard_resource = resource_util.create_resource( resource_id='*', resource_type=curr_resource.type) if wildcard_resource in checked_wildcards: continue checked_wildcards.add(wildcard_resource) resource_rules = self.get_resource_rules(wildcard_resource) if resource_rules: violations.extend( resource_rules.find_policy_violations(ke_cluster)) LOGGER.debug('Returning violations: %r', violations) return violations class ResourceRules(object): """An association of a resource to rules.""" def __init__(self, resource=None, rules=None): """Initialize. Args: resource (Resource): The resource to associate with the rule. rules (set): rules to associate with the resource. """ if not isinstance(rules, set): rules = set([]) self.resource = resource self.rules = rules def find_policy_violations(self, ke_cluster): """Determine if the policy binding matches this rule's criteria. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. Returns: list: RuleViolation """ violations = [] for rule in self.rules: rule_violations = rule.find_policy_violations(ke_cluster) if rule_violations: violations.extend(rule_violations) return violations def __eq__(self, other): """Compare == with another object. Args: other (ResourceRules): object to compare with Returns: int: comparison result """ if not isinstance(other, type(self)): return NotImplemented return (self.resource == other.resource and self.rules == other.rules) def __ne__(self, other): """Compare != with another object. Args: other (object): object to compare with Returns: int: comparison result """ return not self == other def __repr__(self): """String representation of this node. Returns: str: debug string """ return 'IapResourceRules<resource={}, rules={}>'.format( self.resource, self.rules) class Rule(object): """Rule properties from the rule definition file, also finds violations.""" def __init__(self, rule_name, rule_index, rule_mode, rule_key, rule_values): """Initialize. Args: rule_name (str): Name of the loaded rule rule_index (int): The index of the rule from the rule definitions rule_mode (str): blacklist or whitelist rule_key (str): jmespath pointing to the desired key rule_values (list): list of values, interpreted per mode """ self.rule_name = rule_name self.rule_index = rule_index self.rule_mode = rule_mode self.rule_key = rule_key self.rule_values = rule_values # compile right away to return exceptions asap self.rule_jmespath = jmespath.compile(self.rule_key) # TODO: The naming is confusing and needs to be fixed in all scanners. def find_policy_violations(self, ke_cluster): """Find KE violations in based on the rule. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. Returns: list: Returns a list of RuleViolation named tuples """ violations = [] actual = self.rule_jmespath.search(ke_cluster.as_dict) LOGGER.debug('actual jmespath result: %s', actual) if self.rule_mode == 'whitelist': if actual not in self.rule_values: violations.append(self._make_violation( ke_cluster, '%s has value %s, which is not in the whitelist (%s)' % ( self.rule_jmespath.expression, actual, self.rule_values, ), actual, )) if self.rule_mode == 'blacklist': if actual in self.rule_values: violations.append(self._make_violation( ke_cluster, '%s has value %s, which is in the blacklist (%s)' % ( self.rule_jmespath.expression, actual, self.rule_values, ), actual, )) return violations def _make_violation(self, ke_cluster, violation_reason, actual): """Build a RuleViolation for the cluster. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. violation_reason (str): The violation details. actual (object): The actual value of the jmespath expression. Returns: RuleViolation: A new RuleViolation namedtuple. """ return RuleViolation( resource_type=resource_mod.ResourceType.KE_CLUSTER, resource_id=ke_cluster.name, full_name=ke_cluster.resource_full_name, rule_name=self.rule_name, rule_index=self.rule_index, rule_mode=self.rule_mode, rule_values=self.rule_values, actual_value=actual, violation_type='KE_VIOLATION', violation_reason=violation_reason, project_id=ke_cluster.project_id, cluster_name=ke_cluster.name, resource_data=str(ke_cluster), resource_name=ke_cluster.name, ) def __eq__(self, other): """Test whether Rule equals other Rule. Args: other (Rule): object to compare to Returns: int: comparison result """ if not isinstance(other, type(self)): return NotImplemented return all( self.rule_name == other.rule_name, self.rule_index == other.rule_index, self.rule_mode == other.rule_mode, self.rule_values == other.rule_values, ) def __ne__(self, other): """Test whether Rule is not equal to another Rule. Args: other (object): object to compare to Returns: int: comparison result """ return not self == other def __hash__(self): """Make a hash of the rule index. For now, this will suffice since the rule index is assigned automatically when the rules map is built, and the scanner only handles one rule file at a time. Later on, we'll need to revisit this hash method when we process multiple rule files. Returns: int: The hash of the rule index. """ return hash(self.rule_index) # pylint: enable=inconsistent-return-statements # Rule violation. # resource_type: string # resource_id: string # rule_name: string # rule_index: int # violation_type: KE_VIOLATION # violation_reason: string # project_id: string # cluster_name: string RuleViolation = namedtuple('RuleViolation', [ 'resource_type', 'resource_id', 'full_name', 'rule_name', 'rule_index', 'rule_mode', 'rule_values', 'actual_value', 'violation_type', 'violation_reason', 'project_id', 'cluster_name', 'resource_data', 'resource_name', ])
en
0.787531
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Rules engine for checking arbitrary properties ofKE clusters. Rules engine for KE scanner. Initialize. Args: rules_file_path (str): file location of rules snapshot_timestamp (str): snapshot timestamp. Defaults to None. If set, this will be the snapshot timestamp used in the engine. Build KeRuleBook from the rules definition file. Args: global_configs (dict): Global configurations. # TODO: The naming is confusing and needs to be fixed in all scanners. Check if KE cluster satisfies provided rules. Args: ke_cluster (KeCluster): A KE Cluster object to check. force_rebuild (bool): If True, rebuilds the rule book. This will reload the rules definition file and add the rules to the book. Returns: generator: A generator of rule violations. The RuleBook for KE rules. Initialization. Args: rule_defs (list): KE rule definition dicts Add rules to the rule book. Args: rule_defs (dict): rule definitions dictionary Add a rule to the rule book. Args: rule_def (dict): A dictionary containing rule definition properties. rule_index (int): The index of the rule from the rule definitions. Assigned automatically when the rule book is built. # For each resource id associated with the rule, create a # mapping of resource => rules. # pylint: enable=invalid-name Get all the resource rules for resource. Args: resource (Resource): The gcp_type Resource find in the map. Returns: ResourceRules: A ResourceRules object. Find violations in the rule book. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. Returns: list: RuleViolation # resource_ancestors will contain all the resources including # the child resource, which has type kubernetes cluster and # cannot be created (return None) as part of the ancestor path, # we will skip the child as it's not part of the ancestor. An association of a resource to rules. Initialize. Args: resource (Resource): The resource to associate with the rule. rules (set): rules to associate with the resource. Determine if the policy binding matches this rule's criteria. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. Returns: list: RuleViolation Compare == with another object. Args: other (ResourceRules): object to compare with Returns: int: comparison result Compare != with another object. Args: other (object): object to compare with Returns: int: comparison result String representation of this node. Returns: str: debug string Rule properties from the rule definition file, also finds violations. Initialize. Args: rule_name (str): Name of the loaded rule rule_index (int): The index of the rule from the rule definitions rule_mode (str): blacklist or whitelist rule_key (str): jmespath pointing to the desired key rule_values (list): list of values, interpreted per mode # compile right away to return exceptions asap # TODO: The naming is confusing and needs to be fixed in all scanners. Find KE violations in based on the rule. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. Returns: list: Returns a list of RuleViolation named tuples Build a RuleViolation for the cluster. Args: ke_cluster (KeCluster): KE Cluster and ServerConfig data. violation_reason (str): The violation details. actual (object): The actual value of the jmespath expression. Returns: RuleViolation: A new RuleViolation namedtuple. Test whether Rule equals other Rule. Args: other (Rule): object to compare to Returns: int: comparison result Test whether Rule is not equal to another Rule. Args: other (object): object to compare to Returns: int: comparison result Make a hash of the rule index. For now, this will suffice since the rule index is assigned automatically when the rules map is built, and the scanner only handles one rule file at a time. Later on, we'll need to revisit this hash method when we process multiple rule files. Returns: int: The hash of the rule index. # pylint: enable=inconsistent-return-statements # Rule violation. # resource_type: string # resource_id: string # rule_name: string # rule_index: int # violation_type: KE_VIOLATION # violation_reason: string # project_id: string # cluster_name: string
1.798786
2
structmanager/optimization/sol200/cards_opt.py
saullocastro/structmanager
1
6631521
<filename>structmanager/optimization/sol200/cards_opt.py """ Optimization cards (:mod:`structmanager.sol200.cards_opt`) ========================================================== .. currentmodule:: structmanager.sol200.cards_opt` Many input cards related to the optimization problem are wrapped in this module. The input cards more related to the solver are contained in module :mod:`structmanager.sol200.cards_solver`. .. rubric:: Classes .. autosummary:: structmanager.sol200.cards_opt.DCONSTR structmanager.sol200.cards_opt.DEQATN structmanager.sol200.cards_opt.DESVAR structmanager.sol200.cards_opt.DLINK structmanager.sol200.cards_opt.DRESP1 structmanager.sol200.cards_opt.DRESP2 structmanager.sol200.cards_opt.DRESP3 structmanager.sol200.cards_opt.DTABLE structmanager.sol200.cards_opt.DVPREL1 """ from .sizing_data import SDATA from .utils import format_float as ff class DVPREL(object): """Base class to guarantee an unique id among all DVPRELs""" uniqueid = 9000000 class DRESP(object): """Base class to guarantee an unique id among all DRESPs""" uniqueid = 9000000 class DRESP1(DRESP): """Design response DRESP1 Parameters ---------- label : str User-defined label. rtype : str Response type. ptype : str Element flag ('ELEM') or property entry name ('PBAR', PSHELL' etc). region : int or None, optional Region identifier for constraint screening. atta, attb: int or float or None Response attributes. atti : int or float or None or list, optional Response attributes. """ def __init__(self, label, rtype, ptype, region=None, atta=None, attb=None, atti=None): if region is None: region = '' if atta is None: atta = '' if attb is None: attb = '' if atti is None: atti = '' self.id = DRESP.uniqueid DRESP.uniqueid += 1 self.label = label self.rtype = rtype self.ptype = ptype self.region = region self.atta = atta self.attb = attb self.atti = atti def print_card(self, file): """Print the corresponding input card """ dresp1str = ('%s% 8d% 8s% 8s% 8s% 8s% 8s% 8s' % ('DRESP1'.ljust(8), self.id, self.label, self.rtype, self.ptype, self.region, self.atta, self.attb)) if isinstance(self.atti, (int, str)): dresp1str = dresp1str + str(self.atti).rjust(8) elif isinstance(self.atti, list): atticount = 8 for i in range(0, len(self.atti)): atticount += 1 if atticount == 10: file.write(dresp1str + '\n') dresp1str = '+'.ljust(8) atticount = 2 dresp1str += str(self.atti[i]).rjust(8) file.write(dresp1str + '\n') class PRINTAUX(object): """Base class for :class:`DRESP23` and :class:`BASEDV2` """ def __init__(self): pass def print_aux(self, label, listaux, file): auxstr = '+'.ljust(8) + label.ljust(8) count = 2 for aux_id in listaux: count += 1 if count == 10: file.write(auxstr + '\n') count = 3 auxstr = '+'.ljust(16) auxstr += str(aux_id).rjust(8) file.write(auxstr + '\n') class DRESP23(PRINTAUX): """Base class for DVPREL2, DVCREL2, DVMREL2, DRESP2, DRESP3 etc """ def __init__(self): super(DRESP23, self).__init__() self.dvars = [] self.dtable = [] self.dresp1 = [] self.dresp2 = [] def add_dvar(self, dvar_id): self.dvars.append(dvar_id) def add_dtable(self, cons_label): self.dtable.append(cons_label) def add_dresp1(self, dresp1_id): self.dresp1.append(dresp1_id) def add_dresp2(self, dresp2_id): self.dresp2.append(dresp2_id) class DRESP2(DRESP23): """Design response DRESP2 Define equation responses that are used in the design, either as an objective function or as constraints. Parameters ---------- label : str User-defined label. eqid : int or str :class:`.DEQATN` entry identification number, or string informing a pre-programmed function. region : int or None, optional Region used for constraint screening. Notes ----- To add variables, table entries or other :class:`.DRESP1` responses use the methods: :meth:`.add_dvar`, :meth:`.add_dtable` and :meth:`.add_dresp1`. """ def __init__(self, label, eqid, region=None): super(DRESP2, self).__init__() if region is None: region = '' self.id = DRESP.uniqueid DRESP.uniqueid += 1 self.label = label self.eqid = eqid self.region = region def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ drespstr = ('%s% 8d% 8s% 8s% 8s\n' % ('DRESP2'.ljust(8), self.id, self.label, str(self.eqid), self.region)) file.write(drespstr) if len(self.dvars) > 0: self.print_aux('DESVAR', self.dvars, file) if len(self.dtable) > 0: self.print_aux('DTABLE', self.dtable, file) if len(self.dresp1) > 0: self.print_aux('DRESP1', self.dresp1, file) if len(self.dresp2) > 0: self.print_aux('DRESP2', self.dresp2, file) class DRESP3(DRESP23): """Design response DRESP3 Define use-subroutine or built-in responses that can be used in the design either as constraint or as an objective. Parameters ---------- label : str User-defined label. group : str Group name the external response type belongs to. type : str External response type. region : int, optional :class:`.DEQATN` entry identification number. Notes ----- To add variables, table entries or other :class:`.DRESP1` responses use the methods: :meth:`.add_dvar`, :meth:`.add_dtable` and :meth:`.add_dresp1`. """ def __init__(self, label, group, type, region=''): super(DRESP3, self).__init__() self.id = DRESP.uniqueid DRESP.uniqueid += 1 self.label = label self.group = group self.type = type self.region = region def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ drespstr = ('%s% 8d% 8s% 8s% 8s% 8s\n' % ('DRESP3'.ljust(8), self.id, self.label, self.group, self.type, self.region)) file.write(drespstr) if len(self.dvars) > 0: self.print_aux('DESVAR', self.dvars, file) if len(self.dtable) > 0: self.print_aux('DTABLE', self.dtable, file) if len(self.dresp1) > 0: self.print_aux('DRESP1', self.dresp1, file) if len(self.dresp2) > 0: self.print_aux('DRESP2', self.dresp2, file) class DESVAR(object): """Design Variable Parameters ---------- label : str User-supplied name for printing purposes. xinit : float Initial value. xlb : float Lower bound. xub : float Upper bound. dvprel1 : :class:`.DVPREL1` or None, optional The corresponding design variable-to-property relation. code : str or None, optional An additional code to identify the design varible. """ uniqueid = 9000000 def __init__(self, label, xinit, xlb, xub, dvprel1=None, code=None): self.id = DESVAR.uniqueid DESVAR.uniqueid += 1 self.defaults() self.label = label self.xinit = xinit self.xlb = xlb self.xub = xub self.dvprel1 = dvprel1 self.code = code def defaults(self): self.delx = '' self.ddval = '' def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ #file.write('$dvar.id dvar.code %8d %-8s\n' % (self.id, self.code)) #file.write('$HMNAME DESVAR %8d %-8s\n' % (self.id, self.label)) file.write('%s% 8d% 8s%s%s%s% 8s% 8s\n' % ('DESVAR'.ljust(8), self.id, self.label, ff(self.xinit), ff(self.xlb), ff(self.xub), str(self.delx), str(self.ddval))) def show_card(self): print('%s% 8d% 8s%s%s%s% 8s% 8s\n' % ('DESVAR'.ljust(8), self.id, self.label, ff(self.xinit), ff(self.xlb), ff(self.xub), str(self.delx), str(self.ddval))) class BASEDV2(PRINTAUX): """Base class for DVPREL2, DVCREL2 and DVMREL2 """ def __init__(self): super(BASEDV2, self).__init__() self.dvars = [] self.dtable = [] def add_dvar(self, dvar_id): self.dvars.append(dvar_id) def add_dtable(self, cons_label): self.dtable.append(cons_label) class DVPREL1(object): """Design Variable-to-Property relation This is a wrapper for the DVPREL1 card in NASTRAN. Parameters ---------- type : str Name of a property entry, such as "PBAR", "PBEAM", etc. pid : int Property entry identification number. pname : str Parameter name such as "T", "A", "DIM1", or a field position of the property entry. dvids : list Ids of the design variables that will be related to this property parameter. coeffs : list Multipliers to each corresponding design variable. c0 : float, optional Constant term of relation. """ def __init__(self, type, pid, pname, dvids, coeffs, c0=0.): self.id = DVPREL.uniqueid DVPREL.uniqueid += 1 self.type = type self.pid = pid self.pname = pname self.dvids = dvids self.coeffs = coeffs self.c0 = c0 assert len(dvids) == len(coeffs), 'Lengths must be the same' assert len(dvids) > 0, 'At least one variable must be related' def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ file.write('%s% 8d% 8s% 8d% 8s% 8s% 8s%s\n' % ('DVPREL1'.ljust(8), self.id, self.type, self.pid, self.pname, '', '', ff(self.c0))) fieldnum = 0 dvprel1str = '+'.ljust(8) for i in range(len(self.dvids)): fieldnum += 2 if fieldnum == 10: file.write(dvprel1str + '\n') dvprel1str = '+'.ljust(8) fieldnum = 2 dvprel1str += ('% 8d%s' % (self.dvids[i], ff(self.coeffs[i]))) file.write(dvprel1str + '\n') class DVPREL2(BASEDV2): """Design Variable-to-Property relation This is a wrapper for the DVPREL1 card in NASTRAN. Parameters ---------- type : str Name of a property entry, such as "PBAR", "PBEAM", etc. pid : int Property entry identification number. pname : str Parameter name such as "T", "A", "DIM1", or a field position of the property entry. eqid : int or str :class:`.DEQATN` entry identification number, or string informing a pre-programmed function. """ def __init__(self, type, pid, pname, eqid): super(DVPREL2, self).__init__() self.id = DVPREL.uniqueid DVPREL.uniqueid += 1 self.type = type self.pid = pid self.pname = pname self.eqid = eqid def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ dvprel2str = ('%s% 8d% 8s% 8d% 8s% 8s% 8s% 8s\n' % ('DVPREL2'.ljust(8), self.id, self.type, self.pid, self.pname, '', '', str(self.eqid))) file.write(dvprel2str) if len(self.dvars) > 0: self.print_aux('DESVAR', self.dvars, file) if len(self.dtable) > 0: self.print_aux('DTABLE', self.dtable, file) class DCONSTR(object): """Design constraint Parameters ---------- dcid : int Design constraint set identification number. rid : int The corresponding design response id. lallow : float Lower bound on the response quantity. uallow : float Upper bound on the response quantity. """ uniqueid = 9000000 def __init__(self, dcid, rid, lallow, uallow): self.id = DCONSTR.uniqueid DCONSTR.uniqueid += 1 self.dcid = dcid self.rid = rid self.lallow = lallow if lallow is not None else '' self.uallow = uallow if uallow is not None else '' def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ dconstr = ('%s% 8d% 8d% 8s% 8s' % ('DCONSTR'.ljust(8), self.dcid, self.rid, str(self.lallow), str(self.uallow))) file.write(dconstr + '\n') class DLINK(object): r"""Link between design variables DLINK creates links among variables using the form: .. math:: dvar_{dependent} = c_0 + c_{mult} \times (c_1 \times {dvar}_1 + c_2 \times {dvar}_2) where: - `dvar_{dependent}` is the dependent variable - `{dvar}_i` the `i^{th}` design variable - `c_0` is a constant - `c_{mult}` is a common multiplier - `c_i` is an individual multiplier for `{dvar}_i` Parameters ---------- ddvid : int Dependent design variable identification number. c0 : float Constant term. cmult : float Constant multiplier. idvs : list The independent variables. cs : list The multipliers for each variable. Notes ----- Be sure that no dependent variables are being used as independent variables. By default `c0 = 0` and `cmult = 1`. The values of `dvi` and `ci` are inputed as follows:: indep_dv_c = [dv1, c1, dv2, c2, ...] indep_dv_c = [1000000, 1., 1000001, 1., ...] """ uniqueid = 9000000 def __init__(self, ddvid, idvs, cs, c0=0., cmult=1.): self.id = DLINK.uniqueid DLINK.uniqueid += 1 self.ddvid = ddvid self.c0 = c0 self.cmult = cmult self.idvs = idvs self.cs = cs assert len(idvs) == len(cs), 'Lengths must be the same' assert len(idvs) > 0, 'At least one independent variable required' def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ dlinkstr = ('%s% 8d% 8d%s%s' % ('DLINK'.ljust(8), self.id, self.ddvid, ff(self.c0), ff(self.cmult))) dvicicount = 4 for i in range(len(self.idvs)): dvicicount += 2 if dvicicount == 10: file.write(dlinkstr + '\n') dlinkstr = '+'.ljust(8) dvicicount = 2 dlinkstr += ('% 8d%s' % (self.idvs[i], ff(self.cs[i]))) file.write(dlinkstr + '\n') class DEQATN(object): """Equation to calculate customized responses Parameters ---------- eq : str A string containing the equation. For example, the following equation: .. math:: T(x_1, x_2, x_3) = \sqrt{x_1^2 + x_2^2 + x_3^2} can be written as:: eq = 'T(x1,x2,x3)=SQRT(x1**2+x2**2+x3**2)' Notes ----- .. note:: The :meth:`.DEQATN.print_card` method will split the equation, when necessary, in order to keep the limit length up to 56 characters in the first line, and 64 characters for the subsequent lines. """ uniqueid = 9000000 def __init__(self, eq): self.id = DEQATN.uniqueid DEQATN.uniqueid += 1 self.eq = ' ' + eq def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ eq = self.eq deqatn_str = '%s% 8d' % ('DEQATN'.ljust(8), self.id) check = eq[:56].find(';') if check == -1: check = 55 deqatn_str += eq[:check+1] deqatn_str += '\n' file.write(deqatn_str) eq = eq[check+1:] while len(eq) > 0: check = eq[:64].find(';') if check == -1: check = 63 deqatn_str = '+ ' + eq[:check+1].ljust(64) + '\n' file.write(deqatn_str) eq = eq[check+1:] class DTABLE(object): """Design Table used to define many constants Hints: - There can be any number of DTABLE entries in the bulk data. - The user must avoid repeated labels for constants - The constant label must be <= 8 characters. All values for the constants must be of the 'Real' type. Ex:: input_dict={'c1':1. , 'c2':2., 'max8char':999.} """ def __init__(self, input_dict={}): self.input_dict = input_dict def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ if len(self.input_dict) > 0: keys = self.input_dict.keys() dtable_str = 'DTABLE'.ljust(8) count = 0 for k in sorted(keys): v = self.input_dict[k] count += 2 if count == 10: file.write(dtable_str + '\n') dtable_str = '+'.ljust(8) count = 2 dtable_str += '% 8s%s' % (str(k), ff(v)) file.write(dtable_str + '\n') if __name__ == '__main__': with open('tmp.bdf', 'w') as f: test = DRESP2('test',1,100) for i in range(1,16): test.add_dvar(i) test.add_dresp1(i) test.print_card(f)
<filename>structmanager/optimization/sol200/cards_opt.py """ Optimization cards (:mod:`structmanager.sol200.cards_opt`) ========================================================== .. currentmodule:: structmanager.sol200.cards_opt` Many input cards related to the optimization problem are wrapped in this module. The input cards more related to the solver are contained in module :mod:`structmanager.sol200.cards_solver`. .. rubric:: Classes .. autosummary:: structmanager.sol200.cards_opt.DCONSTR structmanager.sol200.cards_opt.DEQATN structmanager.sol200.cards_opt.DESVAR structmanager.sol200.cards_opt.DLINK structmanager.sol200.cards_opt.DRESP1 structmanager.sol200.cards_opt.DRESP2 structmanager.sol200.cards_opt.DRESP3 structmanager.sol200.cards_opt.DTABLE structmanager.sol200.cards_opt.DVPREL1 """ from .sizing_data import SDATA from .utils import format_float as ff class DVPREL(object): """Base class to guarantee an unique id among all DVPRELs""" uniqueid = 9000000 class DRESP(object): """Base class to guarantee an unique id among all DRESPs""" uniqueid = 9000000 class DRESP1(DRESP): """Design response DRESP1 Parameters ---------- label : str User-defined label. rtype : str Response type. ptype : str Element flag ('ELEM') or property entry name ('PBAR', PSHELL' etc). region : int or None, optional Region identifier for constraint screening. atta, attb: int or float or None Response attributes. atti : int or float or None or list, optional Response attributes. """ def __init__(self, label, rtype, ptype, region=None, atta=None, attb=None, atti=None): if region is None: region = '' if atta is None: atta = '' if attb is None: attb = '' if atti is None: atti = '' self.id = DRESP.uniqueid DRESP.uniqueid += 1 self.label = label self.rtype = rtype self.ptype = ptype self.region = region self.atta = atta self.attb = attb self.atti = atti def print_card(self, file): """Print the corresponding input card """ dresp1str = ('%s% 8d% 8s% 8s% 8s% 8s% 8s% 8s' % ('DRESP1'.ljust(8), self.id, self.label, self.rtype, self.ptype, self.region, self.atta, self.attb)) if isinstance(self.atti, (int, str)): dresp1str = dresp1str + str(self.atti).rjust(8) elif isinstance(self.atti, list): atticount = 8 for i in range(0, len(self.atti)): atticount += 1 if atticount == 10: file.write(dresp1str + '\n') dresp1str = '+'.ljust(8) atticount = 2 dresp1str += str(self.atti[i]).rjust(8) file.write(dresp1str + '\n') class PRINTAUX(object): """Base class for :class:`DRESP23` and :class:`BASEDV2` """ def __init__(self): pass def print_aux(self, label, listaux, file): auxstr = '+'.ljust(8) + label.ljust(8) count = 2 for aux_id in listaux: count += 1 if count == 10: file.write(auxstr + '\n') count = 3 auxstr = '+'.ljust(16) auxstr += str(aux_id).rjust(8) file.write(auxstr + '\n') class DRESP23(PRINTAUX): """Base class for DVPREL2, DVCREL2, DVMREL2, DRESP2, DRESP3 etc """ def __init__(self): super(DRESP23, self).__init__() self.dvars = [] self.dtable = [] self.dresp1 = [] self.dresp2 = [] def add_dvar(self, dvar_id): self.dvars.append(dvar_id) def add_dtable(self, cons_label): self.dtable.append(cons_label) def add_dresp1(self, dresp1_id): self.dresp1.append(dresp1_id) def add_dresp2(self, dresp2_id): self.dresp2.append(dresp2_id) class DRESP2(DRESP23): """Design response DRESP2 Define equation responses that are used in the design, either as an objective function or as constraints. Parameters ---------- label : str User-defined label. eqid : int or str :class:`.DEQATN` entry identification number, or string informing a pre-programmed function. region : int or None, optional Region used for constraint screening. Notes ----- To add variables, table entries or other :class:`.DRESP1` responses use the methods: :meth:`.add_dvar`, :meth:`.add_dtable` and :meth:`.add_dresp1`. """ def __init__(self, label, eqid, region=None): super(DRESP2, self).__init__() if region is None: region = '' self.id = DRESP.uniqueid DRESP.uniqueid += 1 self.label = label self.eqid = eqid self.region = region def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ drespstr = ('%s% 8d% 8s% 8s% 8s\n' % ('DRESP2'.ljust(8), self.id, self.label, str(self.eqid), self.region)) file.write(drespstr) if len(self.dvars) > 0: self.print_aux('DESVAR', self.dvars, file) if len(self.dtable) > 0: self.print_aux('DTABLE', self.dtable, file) if len(self.dresp1) > 0: self.print_aux('DRESP1', self.dresp1, file) if len(self.dresp2) > 0: self.print_aux('DRESP2', self.dresp2, file) class DRESP3(DRESP23): """Design response DRESP3 Define use-subroutine or built-in responses that can be used in the design either as constraint or as an objective. Parameters ---------- label : str User-defined label. group : str Group name the external response type belongs to. type : str External response type. region : int, optional :class:`.DEQATN` entry identification number. Notes ----- To add variables, table entries or other :class:`.DRESP1` responses use the methods: :meth:`.add_dvar`, :meth:`.add_dtable` and :meth:`.add_dresp1`. """ def __init__(self, label, group, type, region=''): super(DRESP3, self).__init__() self.id = DRESP.uniqueid DRESP.uniqueid += 1 self.label = label self.group = group self.type = type self.region = region def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ drespstr = ('%s% 8d% 8s% 8s% 8s% 8s\n' % ('DRESP3'.ljust(8), self.id, self.label, self.group, self.type, self.region)) file.write(drespstr) if len(self.dvars) > 0: self.print_aux('DESVAR', self.dvars, file) if len(self.dtable) > 0: self.print_aux('DTABLE', self.dtable, file) if len(self.dresp1) > 0: self.print_aux('DRESP1', self.dresp1, file) if len(self.dresp2) > 0: self.print_aux('DRESP2', self.dresp2, file) class DESVAR(object): """Design Variable Parameters ---------- label : str User-supplied name for printing purposes. xinit : float Initial value. xlb : float Lower bound. xub : float Upper bound. dvprel1 : :class:`.DVPREL1` or None, optional The corresponding design variable-to-property relation. code : str or None, optional An additional code to identify the design varible. """ uniqueid = 9000000 def __init__(self, label, xinit, xlb, xub, dvprel1=None, code=None): self.id = DESVAR.uniqueid DESVAR.uniqueid += 1 self.defaults() self.label = label self.xinit = xinit self.xlb = xlb self.xub = xub self.dvprel1 = dvprel1 self.code = code def defaults(self): self.delx = '' self.ddval = '' def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ #file.write('$dvar.id dvar.code %8d %-8s\n' % (self.id, self.code)) #file.write('$HMNAME DESVAR %8d %-8s\n' % (self.id, self.label)) file.write('%s% 8d% 8s%s%s%s% 8s% 8s\n' % ('DESVAR'.ljust(8), self.id, self.label, ff(self.xinit), ff(self.xlb), ff(self.xub), str(self.delx), str(self.ddval))) def show_card(self): print('%s% 8d% 8s%s%s%s% 8s% 8s\n' % ('DESVAR'.ljust(8), self.id, self.label, ff(self.xinit), ff(self.xlb), ff(self.xub), str(self.delx), str(self.ddval))) class BASEDV2(PRINTAUX): """Base class for DVPREL2, DVCREL2 and DVMREL2 """ def __init__(self): super(BASEDV2, self).__init__() self.dvars = [] self.dtable = [] def add_dvar(self, dvar_id): self.dvars.append(dvar_id) def add_dtable(self, cons_label): self.dtable.append(cons_label) class DVPREL1(object): """Design Variable-to-Property relation This is a wrapper for the DVPREL1 card in NASTRAN. Parameters ---------- type : str Name of a property entry, such as "PBAR", "PBEAM", etc. pid : int Property entry identification number. pname : str Parameter name such as "T", "A", "DIM1", or a field position of the property entry. dvids : list Ids of the design variables that will be related to this property parameter. coeffs : list Multipliers to each corresponding design variable. c0 : float, optional Constant term of relation. """ def __init__(self, type, pid, pname, dvids, coeffs, c0=0.): self.id = DVPREL.uniqueid DVPREL.uniqueid += 1 self.type = type self.pid = pid self.pname = pname self.dvids = dvids self.coeffs = coeffs self.c0 = c0 assert len(dvids) == len(coeffs), 'Lengths must be the same' assert len(dvids) > 0, 'At least one variable must be related' def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ file.write('%s% 8d% 8s% 8d% 8s% 8s% 8s%s\n' % ('DVPREL1'.ljust(8), self.id, self.type, self.pid, self.pname, '', '', ff(self.c0))) fieldnum = 0 dvprel1str = '+'.ljust(8) for i in range(len(self.dvids)): fieldnum += 2 if fieldnum == 10: file.write(dvprel1str + '\n') dvprel1str = '+'.ljust(8) fieldnum = 2 dvprel1str += ('% 8d%s' % (self.dvids[i], ff(self.coeffs[i]))) file.write(dvprel1str + '\n') class DVPREL2(BASEDV2): """Design Variable-to-Property relation This is a wrapper for the DVPREL1 card in NASTRAN. Parameters ---------- type : str Name of a property entry, such as "PBAR", "PBEAM", etc. pid : int Property entry identification number. pname : str Parameter name such as "T", "A", "DIM1", or a field position of the property entry. eqid : int or str :class:`.DEQATN` entry identification number, or string informing a pre-programmed function. """ def __init__(self, type, pid, pname, eqid): super(DVPREL2, self).__init__() self.id = DVPREL.uniqueid DVPREL.uniqueid += 1 self.type = type self.pid = pid self.pname = pname self.eqid = eqid def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ dvprel2str = ('%s% 8d% 8s% 8d% 8s% 8s% 8s% 8s\n' % ('DVPREL2'.ljust(8), self.id, self.type, self.pid, self.pname, '', '', str(self.eqid))) file.write(dvprel2str) if len(self.dvars) > 0: self.print_aux('DESVAR', self.dvars, file) if len(self.dtable) > 0: self.print_aux('DTABLE', self.dtable, file) class DCONSTR(object): """Design constraint Parameters ---------- dcid : int Design constraint set identification number. rid : int The corresponding design response id. lallow : float Lower bound on the response quantity. uallow : float Upper bound on the response quantity. """ uniqueid = 9000000 def __init__(self, dcid, rid, lallow, uallow): self.id = DCONSTR.uniqueid DCONSTR.uniqueid += 1 self.dcid = dcid self.rid = rid self.lallow = lallow if lallow is not None else '' self.uallow = uallow if uallow is not None else '' def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ dconstr = ('%s% 8d% 8d% 8s% 8s' % ('DCONSTR'.ljust(8), self.dcid, self.rid, str(self.lallow), str(self.uallow))) file.write(dconstr + '\n') class DLINK(object): r"""Link between design variables DLINK creates links among variables using the form: .. math:: dvar_{dependent} = c_0 + c_{mult} \times (c_1 \times {dvar}_1 + c_2 \times {dvar}_2) where: - `dvar_{dependent}` is the dependent variable - `{dvar}_i` the `i^{th}` design variable - `c_0` is a constant - `c_{mult}` is a common multiplier - `c_i` is an individual multiplier for `{dvar}_i` Parameters ---------- ddvid : int Dependent design variable identification number. c0 : float Constant term. cmult : float Constant multiplier. idvs : list The independent variables. cs : list The multipliers for each variable. Notes ----- Be sure that no dependent variables are being used as independent variables. By default `c0 = 0` and `cmult = 1`. The values of `dvi` and `ci` are inputed as follows:: indep_dv_c = [dv1, c1, dv2, c2, ...] indep_dv_c = [1000000, 1., 1000001, 1., ...] """ uniqueid = 9000000 def __init__(self, ddvid, idvs, cs, c0=0., cmult=1.): self.id = DLINK.uniqueid DLINK.uniqueid += 1 self.ddvid = ddvid self.c0 = c0 self.cmult = cmult self.idvs = idvs self.cs = cs assert len(idvs) == len(cs), 'Lengths must be the same' assert len(idvs) > 0, 'At least one independent variable required' def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ dlinkstr = ('%s% 8d% 8d%s%s' % ('DLINK'.ljust(8), self.id, self.ddvid, ff(self.c0), ff(self.cmult))) dvicicount = 4 for i in range(len(self.idvs)): dvicicount += 2 if dvicicount == 10: file.write(dlinkstr + '\n') dlinkstr = '+'.ljust(8) dvicicount = 2 dlinkstr += ('% 8d%s' % (self.idvs[i], ff(self.cs[i]))) file.write(dlinkstr + '\n') class DEQATN(object): """Equation to calculate customized responses Parameters ---------- eq : str A string containing the equation. For example, the following equation: .. math:: T(x_1, x_2, x_3) = \sqrt{x_1^2 + x_2^2 + x_3^2} can be written as:: eq = 'T(x1,x2,x3)=SQRT(x1**2+x2**2+x3**2)' Notes ----- .. note:: The :meth:`.DEQATN.print_card` method will split the equation, when necessary, in order to keep the limit length up to 56 characters in the first line, and 64 characters for the subsequent lines. """ uniqueid = 9000000 def __init__(self, eq): self.id = DEQATN.uniqueid DEQATN.uniqueid += 1 self.eq = ' ' + eq def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ eq = self.eq deqatn_str = '%s% 8d' % ('DEQATN'.ljust(8), self.id) check = eq[:56].find(';') if check == -1: check = 55 deqatn_str += eq[:check+1] deqatn_str += '\n' file.write(deqatn_str) eq = eq[check+1:] while len(eq) > 0: check = eq[:64].find(';') if check == -1: check = 63 deqatn_str = '+ ' + eq[:check+1].ljust(64) + '\n' file.write(deqatn_str) eq = eq[check+1:] class DTABLE(object): """Design Table used to define many constants Hints: - There can be any number of DTABLE entries in the bulk data. - The user must avoid repeated labels for constants - The constant label must be <= 8 characters. All values for the constants must be of the 'Real' type. Ex:: input_dict={'c1':1. , 'c2':2., 'max8char':999.} """ def __init__(self, input_dict={}): self.input_dict = input_dict def print_card(self, file): """Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. """ if len(self.input_dict) > 0: keys = self.input_dict.keys() dtable_str = 'DTABLE'.ljust(8) count = 0 for k in sorted(keys): v = self.input_dict[k] count += 2 if count == 10: file.write(dtable_str + '\n') dtable_str = '+'.ljust(8) count = 2 dtable_str += '% 8s%s' % (str(k), ff(v)) file.write(dtable_str + '\n') if __name__ == '__main__': with open('tmp.bdf', 'w') as f: test = DRESP2('test',1,100) for i in range(1,16): test.add_dvar(i) test.add_dresp1(i) test.print_card(f)
en
0.567132
Optimization cards (:mod:`structmanager.sol200.cards_opt`) ========================================================== .. currentmodule:: structmanager.sol200.cards_opt` Many input cards related to the optimization problem are wrapped in this module. The input cards more related to the solver are contained in module :mod:`structmanager.sol200.cards_solver`. .. rubric:: Classes .. autosummary:: structmanager.sol200.cards_opt.DCONSTR structmanager.sol200.cards_opt.DEQATN structmanager.sol200.cards_opt.DESVAR structmanager.sol200.cards_opt.DLINK structmanager.sol200.cards_opt.DRESP1 structmanager.sol200.cards_opt.DRESP2 structmanager.sol200.cards_opt.DRESP3 structmanager.sol200.cards_opt.DTABLE structmanager.sol200.cards_opt.DVPREL1 Base class to guarantee an unique id among all DVPRELs Base class to guarantee an unique id among all DRESPs Design response DRESP1 Parameters ---------- label : str User-defined label. rtype : str Response type. ptype : str Element flag ('ELEM') or property entry name ('PBAR', PSHELL' etc). region : int or None, optional Region identifier for constraint screening. atta, attb: int or float or None Response attributes. atti : int or float or None or list, optional Response attributes. Print the corresponding input card Base class for :class:`DRESP23` and :class:`BASEDV2` Base class for DVPREL2, DVCREL2, DVMREL2, DRESP2, DRESP3 etc Design response DRESP2 Define equation responses that are used in the design, either as an objective function or as constraints. Parameters ---------- label : str User-defined label. eqid : int or str :class:`.DEQATN` entry identification number, or string informing a pre-programmed function. region : int or None, optional Region used for constraint screening. Notes ----- To add variables, table entries or other :class:`.DRESP1` responses use the methods: :meth:`.add_dvar`, :meth:`.add_dtable` and :meth:`.add_dresp1`. Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. Design response DRESP3 Define use-subroutine or built-in responses that can be used in the design either as constraint or as an objective. Parameters ---------- label : str User-defined label. group : str Group name the external response type belongs to. type : str External response type. region : int, optional :class:`.DEQATN` entry identification number. Notes ----- To add variables, table entries or other :class:`.DRESP1` responses use the methods: :meth:`.add_dvar`, :meth:`.add_dtable` and :meth:`.add_dresp1`. Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. Design Variable Parameters ---------- label : str User-supplied name for printing purposes. xinit : float Initial value. xlb : float Lower bound. xub : float Upper bound. dvprel1 : :class:`.DVPREL1` or None, optional The corresponding design variable-to-property relation. code : str or None, optional An additional code to identify the design varible. Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. #file.write('$dvar.id dvar.code %8d %-8s\n' % (self.id, self.code)) #file.write('$HMNAME DESVAR %8d %-8s\n' % (self.id, self.label)) Base class for DVPREL2, DVCREL2 and DVMREL2 Design Variable-to-Property relation This is a wrapper for the DVPREL1 card in NASTRAN. Parameters ---------- type : str Name of a property entry, such as "PBAR", "PBEAM", etc. pid : int Property entry identification number. pname : str Parameter name such as "T", "A", "DIM1", or a field position of the property entry. dvids : list Ids of the design variables that will be related to this property parameter. coeffs : list Multipliers to each corresponding design variable. c0 : float, optional Constant term of relation. Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. Design Variable-to-Property relation This is a wrapper for the DVPREL1 card in NASTRAN. Parameters ---------- type : str Name of a property entry, such as "PBAR", "PBEAM", etc. pid : int Property entry identification number. pname : str Parameter name such as "T", "A", "DIM1", or a field position of the property entry. eqid : int or str :class:`.DEQATN` entry identification number, or string informing a pre-programmed function. Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. Design constraint Parameters ---------- dcid : int Design constraint set identification number. rid : int The corresponding design response id. lallow : float Lower bound on the response quantity. uallow : float Upper bound on the response quantity. Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. Link between design variables DLINK creates links among variables using the form: .. math:: dvar_{dependent} = c_0 + c_{mult} \times (c_1 \times {dvar}_1 + c_2 \times {dvar}_2) where: - `dvar_{dependent}` is the dependent variable - `{dvar}_i` the `i^{th}` design variable - `c_0` is a constant - `c_{mult}` is a common multiplier - `c_i` is an individual multiplier for `{dvar}_i` Parameters ---------- ddvid : int Dependent design variable identification number. c0 : float Constant term. cmult : float Constant multiplier. idvs : list The independent variables. cs : list The multipliers for each variable. Notes ----- Be sure that no dependent variables are being used as independent variables. By default `c0 = 0` and `cmult = 1`. The values of `dvi` and `ci` are inputed as follows:: indep_dv_c = [dv1, c1, dv2, c2, ...] indep_dv_c = [1000000, 1., 1000001, 1., ...] Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. Equation to calculate customized responses Parameters ---------- eq : str A string containing the equation. For example, the following equation: .. math:: T(x_1, x_2, x_3) = \sqrt{x_1^2 + x_2^2 + x_3^2} can be written as:: eq = 'T(x1,x2,x3)=SQRT(x1**2+x2**2+x3**2)' Notes ----- .. note:: The :meth:`.DEQATN.print_card` method will split the equation, when necessary, in order to keep the limit length up to 56 characters in the first line, and 64 characters for the subsequent lines. Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method. Design Table used to define many constants Hints: - There can be any number of DTABLE entries in the bulk data. - The user must avoid repeated labels for constants - The constant label must be <= 8 characters. All values for the constants must be of the 'Real' type. Ex:: input_dict={'c1':1. , 'c2':2., 'max8char':999.} Print the corresponding input card Parameters ---------- file : file File object with a :meth:`write` method.
2.26457
2
leetcode/q0022-generate-parentheses/solution.py
HiAwesome/python-leetcode
0
6631522
<filename>leetcode/q0022-generate-parentheses/solution.py """ https://leetcode-cn.com/problems/generate-parentheses/ https://leetcode-cn.com/problems/generate-parentheses/solution/gua-hao-sheng-cheng-by-leetcode-solution/ """ from functools import lru_cache from typing import List class Solution: @lru_cache(None) def generateParenthesis(self, m: int) -> List[str]: if m == 0: return [''] ans = [] for c in range(m): for left in self.generateParenthesis(c): for right in self.generateParenthesis(m - 1 - c): ans.append('({}){}'.format(left, right)) return ans
<filename>leetcode/q0022-generate-parentheses/solution.py """ https://leetcode-cn.com/problems/generate-parentheses/ https://leetcode-cn.com/problems/generate-parentheses/solution/gua-hao-sheng-cheng-by-leetcode-solution/ """ from functools import lru_cache from typing import List class Solution: @lru_cache(None) def generateParenthesis(self, m: int) -> List[str]: if m == 0: return [''] ans = [] for c in range(m): for left in self.generateParenthesis(c): for right in self.generateParenthesis(m - 1 - c): ans.append('({}){}'.format(left, right)) return ans
en
0.562167
https://leetcode-cn.com/problems/generate-parentheses/ https://leetcode-cn.com/problems/generate-parentheses/solution/gua-hao-sheng-cheng-by-leetcode-solution/
3.352703
3
tests/geometric_types_test.py
yisibl/picosvg
72
6631523
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from picosvg.geometric_types import Point, Rect, Vector import pytest def test_point_subtraction_and_addition(): p0 = Point(1, 3) p1 = Point(-2, 4) v = p1 - p0 assert isinstance(v, Vector) assert v.x == -3 assert v.y == 1 p2 = p1 - v assert isinstance(p2, Point) assert p2 == p0 p3 = p0 + v assert isinstance(p3, Point) assert p3 == p1 class TestVector: def test_add_vec(self): assert Vector(1, 2) + Vector(2, 3) == Vector(3, 5) assert Vector(1, 2) + Point(2, 3) == Point(3, 5) assert Point(2, 3) + Vector(1, 2) == Point(3, 5) def test_sub_vec(self): assert Vector(3, 5) - Vector(2, 3) == Vector(1, 2) def test_multiply(self): v = Vector(3, 4) assert v * 2 == Vector(6, 8) assert v * 0.5 == Vector(1.5, 2) assert 3 * v == Vector(9, 12) assert 1.5 * v == Vector(4.5, 6) with pytest.raises(TypeError): _ = v * "a" def test_perpendicular(self): v = Vector(-3, 5) assert v.perpendicular() == Vector(-5, -3) assert v.perpendicular(clockwise=False) == Vector(-5, -3) assert v.perpendicular(clockwise=True) == Vector(5, 3) def test_norm(self): assert Vector(0, 4).norm() == 4 assert Vector(-2, 0).norm() == 2 assert Vector(3, -2).norm() == pytest.approx(3.605551) def test_unit(self): assert Vector(0, 10).unit() == Vector(0, 1) assert Vector(3, 0).unit() == Vector(1, 0) assert Vector(453, -453).unit() == pytest.approx(Vector(0.707107, -0.707107)) def test_dot(self): assert Vector(2, -3).dot(Vector(-4, 5)) == 2 * -4 + -3 * 5 @staticmethod def assert_vectors_are_parallel(v1, v2): # vectors are parallel when their cross product is the zero vector assert v1.x * v2.y == pytest.approx(v1.y * v2.x) def test_projection(self): v1 = Vector(5, 2) # vector projection onto its perpendicular is (0,0) by definition assert v1.projection(v1.perpendicular(clockwise=False)) == Vector(0, 0) assert v1.projection(v1.perpendicular(clockwise=True)) == Vector(0, 0) assert v1.projection(Vector(0, 1)) == Vector(0, 2) assert v1.projection(Vector(0, -1)) == Vector(0, 2) assert v1.projection(Vector(1, 0)) == Vector(5, 0) assert v1.projection(Vector(-1, 0)) == Vector(5, 0) v2 = Vector(2, 3) p = v1.projection(v2) assert p == pytest.approx(Vector(2.461538, 3.692308)) self.assert_vectors_are_parallel(v2, p) v2 = Vector(3, -4) p = v1.projection(v2) assert p == pytest.approx(Vector(0.84, -1.12)) self.assert_vectors_are_parallel(v2, p) def test_empty_rect(): assert Rect(x=1, y=2, w=3, h=0).empty() assert Rect(x=1, y=2, w=0, h=3).empty()
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from picosvg.geometric_types import Point, Rect, Vector import pytest def test_point_subtraction_and_addition(): p0 = Point(1, 3) p1 = Point(-2, 4) v = p1 - p0 assert isinstance(v, Vector) assert v.x == -3 assert v.y == 1 p2 = p1 - v assert isinstance(p2, Point) assert p2 == p0 p3 = p0 + v assert isinstance(p3, Point) assert p3 == p1 class TestVector: def test_add_vec(self): assert Vector(1, 2) + Vector(2, 3) == Vector(3, 5) assert Vector(1, 2) + Point(2, 3) == Point(3, 5) assert Point(2, 3) + Vector(1, 2) == Point(3, 5) def test_sub_vec(self): assert Vector(3, 5) - Vector(2, 3) == Vector(1, 2) def test_multiply(self): v = Vector(3, 4) assert v * 2 == Vector(6, 8) assert v * 0.5 == Vector(1.5, 2) assert 3 * v == Vector(9, 12) assert 1.5 * v == Vector(4.5, 6) with pytest.raises(TypeError): _ = v * "a" def test_perpendicular(self): v = Vector(-3, 5) assert v.perpendicular() == Vector(-5, -3) assert v.perpendicular(clockwise=False) == Vector(-5, -3) assert v.perpendicular(clockwise=True) == Vector(5, 3) def test_norm(self): assert Vector(0, 4).norm() == 4 assert Vector(-2, 0).norm() == 2 assert Vector(3, -2).norm() == pytest.approx(3.605551) def test_unit(self): assert Vector(0, 10).unit() == Vector(0, 1) assert Vector(3, 0).unit() == Vector(1, 0) assert Vector(453, -453).unit() == pytest.approx(Vector(0.707107, -0.707107)) def test_dot(self): assert Vector(2, -3).dot(Vector(-4, 5)) == 2 * -4 + -3 * 5 @staticmethod def assert_vectors_are_parallel(v1, v2): # vectors are parallel when their cross product is the zero vector assert v1.x * v2.y == pytest.approx(v1.y * v2.x) def test_projection(self): v1 = Vector(5, 2) # vector projection onto its perpendicular is (0,0) by definition assert v1.projection(v1.perpendicular(clockwise=False)) == Vector(0, 0) assert v1.projection(v1.perpendicular(clockwise=True)) == Vector(0, 0) assert v1.projection(Vector(0, 1)) == Vector(0, 2) assert v1.projection(Vector(0, -1)) == Vector(0, 2) assert v1.projection(Vector(1, 0)) == Vector(5, 0) assert v1.projection(Vector(-1, 0)) == Vector(5, 0) v2 = Vector(2, 3) p = v1.projection(v2) assert p == pytest.approx(Vector(2.461538, 3.692308)) self.assert_vectors_are_parallel(v2, p) v2 = Vector(3, -4) p = v1.projection(v2) assert p == pytest.approx(Vector(0.84, -1.12)) self.assert_vectors_are_parallel(v2, p) def test_empty_rect(): assert Rect(x=1, y=2, w=3, h=0).empty() assert Rect(x=1, y=2, w=0, h=3).empty()
en
0.887484
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # vectors are parallel when their cross product is the zero vector # vector projection onto its perpendicular is (0,0) by definition
3.030063
3
genomic_data_service/rnaseq/commands/index_rna_seq_data.py
ENCODE-DCC/genomic-data-service
3
6631524
<reponame>ENCODE-DCC/genomic-data-service import click from elasticsearch import Elasticsearch as Client from genomic_data_service.rnaseq.expressions import Expressions from genomic_data_service.rnaseq.repository.elasticsearch import Elasticsearch from genomic_data_service.rnaseq.remote.portal import Portal HOST = '127.0.0.1:9202' @click.command() @click.option( '--host', default=HOST, help='Location of Elasticsearch instance.' ) def index_rna_seq_data(host): client = Client(host) portal = Portal() repository = Elasticsearch(client) expressions = Expressions(portal, repository) expressions.index() if __name__ == '__main__': index_rna_seq_data()
import click from elasticsearch import Elasticsearch as Client from genomic_data_service.rnaseq.expressions import Expressions from genomic_data_service.rnaseq.repository.elasticsearch import Elasticsearch from genomic_data_service.rnaseq.remote.portal import Portal HOST = '127.0.0.1:9202' @click.command() @click.option( '--host', default=HOST, help='Location of Elasticsearch instance.' ) def index_rna_seq_data(host): client = Client(host) portal = Portal() repository = Elasticsearch(client) expressions = Expressions(portal, repository) expressions.index() if __name__ == '__main__': index_rna_seq_data()
none
1
1.847125
2
tests/unit/modules/readers/test_JecVariations.py
shane-breeze/zinv-analysis
1
6631525
import pytest import mock import os import numpy as np import awkward as awk from zinv.modules.readers import JecVariations class DummyColl(object): pass class DummyEvent(object): def __init__(self): self.iblock = 0 self.nsig = 0 self.source = '' self.cache = {} self.attribute_variation_sources = [ "jesTotal", "jerSF", "unclust", "jesAbsoluteStat", ] self.config = mock.MagicMock() self.Jet = DummyColl() self.GenJet = DummyColl() self.MET = DummyColl() self.PuppiMET = DummyColl() def register_function(self, event, name, function): self.__dict__[name] = function def hasbranch(self, branch): return hasattr(self, branch) @pytest.fixture() def event(): return DummyEvent() @pytest.fixture() def module(): return JecVariations( jes_unc_file = "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/jecs/Summer16_23Sep2016V4_MC_UncertaintySources_AK4PFchs.csv", jer_sf_file = "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/jecs/Spring16_25nsV10a_MC_SF_AK4PFchs.csv", jer_file = "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/jecs/Spring16_25nsV10_MC_PtResolution_AK4PFchs.csv", apply_jer_corrections = True, jes_regex = "jes(?P<source>.*)", unclust_threshold = 15., maxdr_jets_with_genjets = 0.2, ndpt_jets_with_genjets = 3., ) def test_jec_variations_begin(module, event): module.begin(event) print(module.jes_sources) assert all(t in module.jes_sources for t in ["Total", "AbsoluteStat"]) assert all(t in event.JesSources for t in ["jesTotal", "jesAbsoluteStat"]) @pytest.fixture() def event_module_run(module, event): def norm(*args, **kwargs): return 0.5 np.random.normal = mock.Mock(side_effect=norm) inputs = { "jpt": [[], [16.], [60., 70.]], "jeta": [[], [-3.1], [0.5, 3.1]], "jphi": [[], [0.1], [0.3, 0.5]], "gjpt": [[], [17.], [65., 75.]], "gjeta": [[], [-3.11], [1.0, 3.09]], "gjphi": [[], [0.1], [0.3, 0.5]], "rho": [0., 20., 50.], "met": [200., 220., 240.], "mephi": [0.9, 0.1, -0.7], "met_sumet": [400., 600., 800.], } outputs = { "jpt": [[], [18.624], [76.29538585, 81.48]], "jptres": [[], [0.456227396877664], [0.13144913, 0.179063214239771]], "jjersf": [[], [1.164], [1.271589764, 1.164]], "jjersfdown": [[], [-0.05369415808], [-0.06170861319, -0.05369415808]], "jjersfup": [[], [0.05369415808], [0.05007691581, 0.05369415808]], "jjestotdown": [[], [-0.07725], [-0.0144, -0.05163]], "jjestotup": [[], [0.07725], [0.0144, 0.05163]], "met": [200., 217.376, 228.3443658], "mephi": [0.9, 0.1, -0.8071129825], } event.config.dataset.idx = 2 event.size = 3 jet_pt = awk.JaggedArray.fromiter(inputs["jpt"]).astype(np.float32) jet_eta = awk.JaggedArray.fromiter(inputs["jeta"]).astype(np.float32) jet_phi = awk.JaggedArray.fromiter(inputs["jphi"]).astype(np.float32) genjet_pt = awk.JaggedArray.fromiter(inputs["gjpt"]).astype(np.float32) genjet_eta = awk.JaggedArray.fromiter(inputs["gjeta"]).astype(np.float32) genjet_phi = awk.JaggedArray.fromiter(inputs["gjphi"]).astype(np.float32) rho = np.array(inputs["rho"], dtype=np.float32) met_pt = np.array(inputs["met"], dtype=np.float32) met_phi = np.array(inputs["mephi"], dtype=np.float32) met_sumet = np.array(inputs["met_sumet"], dtype=np.float32) event.Jet_pt = jet_pt event.Jet.pt = jet_pt event.Jet_ptJESOnly = jet_pt event.Jet.ptJESOnly = jet_pt event.Jet_eta = jet_eta event.Jet.eta = jet_eta event.Jet_phi = jet_phi event.Jet.phi = jet_phi event.GenJet_pt = genjet_pt event.GenJet.pt = genjet_pt event.GenJet_eta = genjet_eta event.GenJet.eta = genjet_eta event.GenJet_phi = genjet_phi event.GenJet.phi = genjet_phi event.fixedGridRhoFastjetAll = rho event.MET_pt = met_pt event.MET.pt = met_pt event.MET_ptJESOnly = met_pt event.MET.ptJESOnly = met_pt event.MET_phi = met_phi event.MET.phi = met_phi event.MET_phiJESOnly = met_phi event.MET.phiJESOnly = met_phi event.MET_sumEt = met_sumet event.MET.sumEt = met_sumet event.MET_sumEtJESOnly = met_sumet event.MET.sumEtJESOnly = met_sumet event.PuppiMET_pt = met_pt event.PuppiMET.pt = met_pt event.PuppiMET_ptJESOnly = met_pt event.PuppiMET.ptJESOnly = met_pt event.PuppiMET_phi = met_phi event.PuppiMET.phi = met_phi event.PuppiMET_phiJESOnly = met_phi event.PuppiMET.phiJESOnly = met_phi event.PuppiMET_sumEt = met_sumet event.PuppiMET.sumEt = met_sumet event.PuppiMET_sumEtJESOnly = met_sumet event.PuppiMET.sumEtJESOnly = met_sumet module.begin(event) #module.event(event) event.outputs = outputs return event, module def test_jec_variations_ptres(event_module_run): event = event_module_run[0] outputs = event.outputs assert np.allclose( event.Jet_ptResolution(event).content, awk.JaggedArray.fromiter(outputs["jptres"]).astype(np.float32).content, rtol=1e-6, equal_nan=True, ) def test_jec_variations_jersfdown(event_module_run): event = event_module_run[0] outputs = event.outputs assert np.allclose( event.Jet_jerSF(event, "jerSF", -1.).content, awk.JaggedArray.fromiter(outputs["jjersfdown"]).astype(np.float32).content, rtol=1e-5, equal_nan=True, ) def test_jec_variations_jersfup(event_module_run): event = event_module_run[0] outputs = event.outputs assert np.allclose( event.Jet_jerSF(event, "jerSF", 1.).content, awk.JaggedArray.fromiter(outputs["jjersfup"]).astype(np.float32).content, rtol=1e-5, equal_nan=True, ) def test_jec_variations_newjpt(event_module_run): event, module = event_module_run module.event(event) outputs = event.outputs assert np.allclose( event.Jet_pt.content, awk.JaggedArray.fromiter(outputs["jpt"]).astype(np.float32).content, rtol=1e-6, equal_nan=True, ) def test_jec_variations_newmet(event_module_run): event, module = event_module_run module.event(event) outputs = event.outputs assert np.allclose( event.MET_pt, np.array(outputs["met"], dtype=np.float32), rtol=1e-6, equal_nan=True, ) def test_jec_variations_newmephi(event_module_run): event, module = event_module_run module.event(event) outputs = event.outputs assert np.allclose( event.MET_phi, np.array(outputs["mephi"], dtype=np.float32), rtol=1e-6, equal_nan=True, ) def test_jec_variations_jestotup(event_module_run): event, module = event_module_run module.event(event) outputs = event.outputs assert np.allclose( event.Jet_jesSF(event, "jesTotal", 1.).content, awk.JaggedArray.fromiter(outputs["jjestotup"]).astype(np.float32).content, rtol=1e-6, equal_nan=True, ) def test_jec_variations_jestotdown(event_module_run): event, module = event_module_run module.event(event) outputs = event.outputs assert np.allclose( event.Jet_jesSF(event, "jesTotal", -1.).content, awk.JaggedArray.fromiter(outputs["jjestotdown"]).astype(np.float32).content, rtol=1e-6, equal_nan=True, )
import pytest import mock import os import numpy as np import awkward as awk from zinv.modules.readers import JecVariations class DummyColl(object): pass class DummyEvent(object): def __init__(self): self.iblock = 0 self.nsig = 0 self.source = '' self.cache = {} self.attribute_variation_sources = [ "jesTotal", "jerSF", "unclust", "jesAbsoluteStat", ] self.config = mock.MagicMock() self.Jet = DummyColl() self.GenJet = DummyColl() self.MET = DummyColl() self.PuppiMET = DummyColl() def register_function(self, event, name, function): self.__dict__[name] = function def hasbranch(self, branch): return hasattr(self, branch) @pytest.fixture() def event(): return DummyEvent() @pytest.fixture() def module(): return JecVariations( jes_unc_file = "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/jecs/Summer16_23Sep2016V4_MC_UncertaintySources_AK4PFchs.csv", jer_sf_file = "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/jecs/Spring16_25nsV10a_MC_SF_AK4PFchs.csv", jer_file = "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/jecs/Spring16_25nsV10_MC_PtResolution_AK4PFchs.csv", apply_jer_corrections = True, jes_regex = "jes(?P<source>.*)", unclust_threshold = 15., maxdr_jets_with_genjets = 0.2, ndpt_jets_with_genjets = 3., ) def test_jec_variations_begin(module, event): module.begin(event) print(module.jes_sources) assert all(t in module.jes_sources for t in ["Total", "AbsoluteStat"]) assert all(t in event.JesSources for t in ["jesTotal", "jesAbsoluteStat"]) @pytest.fixture() def event_module_run(module, event): def norm(*args, **kwargs): return 0.5 np.random.normal = mock.Mock(side_effect=norm) inputs = { "jpt": [[], [16.], [60., 70.]], "jeta": [[], [-3.1], [0.5, 3.1]], "jphi": [[], [0.1], [0.3, 0.5]], "gjpt": [[], [17.], [65., 75.]], "gjeta": [[], [-3.11], [1.0, 3.09]], "gjphi": [[], [0.1], [0.3, 0.5]], "rho": [0., 20., 50.], "met": [200., 220., 240.], "mephi": [0.9, 0.1, -0.7], "met_sumet": [400., 600., 800.], } outputs = { "jpt": [[], [18.624], [76.29538585, 81.48]], "jptres": [[], [0.456227396877664], [0.13144913, 0.179063214239771]], "jjersf": [[], [1.164], [1.271589764, 1.164]], "jjersfdown": [[], [-0.05369415808], [-0.06170861319, -0.05369415808]], "jjersfup": [[], [0.05369415808], [0.05007691581, 0.05369415808]], "jjestotdown": [[], [-0.07725], [-0.0144, -0.05163]], "jjestotup": [[], [0.07725], [0.0144, 0.05163]], "met": [200., 217.376, 228.3443658], "mephi": [0.9, 0.1, -0.8071129825], } event.config.dataset.idx = 2 event.size = 3 jet_pt = awk.JaggedArray.fromiter(inputs["jpt"]).astype(np.float32) jet_eta = awk.JaggedArray.fromiter(inputs["jeta"]).astype(np.float32) jet_phi = awk.JaggedArray.fromiter(inputs["jphi"]).astype(np.float32) genjet_pt = awk.JaggedArray.fromiter(inputs["gjpt"]).astype(np.float32) genjet_eta = awk.JaggedArray.fromiter(inputs["gjeta"]).astype(np.float32) genjet_phi = awk.JaggedArray.fromiter(inputs["gjphi"]).astype(np.float32) rho = np.array(inputs["rho"], dtype=np.float32) met_pt = np.array(inputs["met"], dtype=np.float32) met_phi = np.array(inputs["mephi"], dtype=np.float32) met_sumet = np.array(inputs["met_sumet"], dtype=np.float32) event.Jet_pt = jet_pt event.Jet.pt = jet_pt event.Jet_ptJESOnly = jet_pt event.Jet.ptJESOnly = jet_pt event.Jet_eta = jet_eta event.Jet.eta = jet_eta event.Jet_phi = jet_phi event.Jet.phi = jet_phi event.GenJet_pt = genjet_pt event.GenJet.pt = genjet_pt event.GenJet_eta = genjet_eta event.GenJet.eta = genjet_eta event.GenJet_phi = genjet_phi event.GenJet.phi = genjet_phi event.fixedGridRhoFastjetAll = rho event.MET_pt = met_pt event.MET.pt = met_pt event.MET_ptJESOnly = met_pt event.MET.ptJESOnly = met_pt event.MET_phi = met_phi event.MET.phi = met_phi event.MET_phiJESOnly = met_phi event.MET.phiJESOnly = met_phi event.MET_sumEt = met_sumet event.MET.sumEt = met_sumet event.MET_sumEtJESOnly = met_sumet event.MET.sumEtJESOnly = met_sumet event.PuppiMET_pt = met_pt event.PuppiMET.pt = met_pt event.PuppiMET_ptJESOnly = met_pt event.PuppiMET.ptJESOnly = met_pt event.PuppiMET_phi = met_phi event.PuppiMET.phi = met_phi event.PuppiMET_phiJESOnly = met_phi event.PuppiMET.phiJESOnly = met_phi event.PuppiMET_sumEt = met_sumet event.PuppiMET.sumEt = met_sumet event.PuppiMET_sumEtJESOnly = met_sumet event.PuppiMET.sumEtJESOnly = met_sumet module.begin(event) #module.event(event) event.outputs = outputs return event, module def test_jec_variations_ptres(event_module_run): event = event_module_run[0] outputs = event.outputs assert np.allclose( event.Jet_ptResolution(event).content, awk.JaggedArray.fromiter(outputs["jptres"]).astype(np.float32).content, rtol=1e-6, equal_nan=True, ) def test_jec_variations_jersfdown(event_module_run): event = event_module_run[0] outputs = event.outputs assert np.allclose( event.Jet_jerSF(event, "jerSF", -1.).content, awk.JaggedArray.fromiter(outputs["jjersfdown"]).astype(np.float32).content, rtol=1e-5, equal_nan=True, ) def test_jec_variations_jersfup(event_module_run): event = event_module_run[0] outputs = event.outputs assert np.allclose( event.Jet_jerSF(event, "jerSF", 1.).content, awk.JaggedArray.fromiter(outputs["jjersfup"]).astype(np.float32).content, rtol=1e-5, equal_nan=True, ) def test_jec_variations_newjpt(event_module_run): event, module = event_module_run module.event(event) outputs = event.outputs assert np.allclose( event.Jet_pt.content, awk.JaggedArray.fromiter(outputs["jpt"]).astype(np.float32).content, rtol=1e-6, equal_nan=True, ) def test_jec_variations_newmet(event_module_run): event, module = event_module_run module.event(event) outputs = event.outputs assert np.allclose( event.MET_pt, np.array(outputs["met"], dtype=np.float32), rtol=1e-6, equal_nan=True, ) def test_jec_variations_newmephi(event_module_run): event, module = event_module_run module.event(event) outputs = event.outputs assert np.allclose( event.MET_phi, np.array(outputs["mephi"], dtype=np.float32), rtol=1e-6, equal_nan=True, ) def test_jec_variations_jestotup(event_module_run): event, module = event_module_run module.event(event) outputs = event.outputs assert np.allclose( event.Jet_jesSF(event, "jesTotal", 1.).content, awk.JaggedArray.fromiter(outputs["jjestotup"]).astype(np.float32).content, rtol=1e-6, equal_nan=True, ) def test_jec_variations_jestotdown(event_module_run): event, module = event_module_run module.event(event) outputs = event.outputs assert np.allclose( event.Jet_jesSF(event, "jesTotal", -1.).content, awk.JaggedArray.fromiter(outputs["jjestotdown"]).astype(np.float32).content, rtol=1e-6, equal_nan=True, )
hi
0.069309
#module.event(event)
1.961872
2
Lab_5/Q7.py
Hubert-HD/Lab_ADA
0
6631526
# Q7: What is the time complexity of import random n = int(random.random() * 100) i = 1 while i < n: print(i) i = i * 2 """" N° iteracion value de i 1 1 2 1 * 2 3 (1 * 2) * 2 4 ((1 * 2) * 2) * 2 . . . . . . k 1 * 2^(k-1) i > n 2^(k-1) > n 2^k ~ n k ~ log2(n) O(log(n)) """
# Q7: What is the time complexity of import random n = int(random.random() * 100) i = 1 while i < n: print(i) i = i * 2 """" N° iteracion value de i 1 1 2 1 * 2 3 (1 * 2) * 2 4 ((1 * 2) * 2) * 2 . . . . . . k 1 * 2^(k-1) i > n 2^(k-1) > n 2^k ~ n k ~ log2(n) O(log(n)) """
en
0.324162
# Q7: What is the time complexity of " N° iteracion value de i 1 1 2 1 * 2 3 (1 * 2) * 2 4 ((1 * 2) * 2) * 2 . . . . . . k 1 * 2^(k-1) i > n 2^(k-1) > n 2^k ~ n k ~ log2(n) O(log(n))
3.815935
4
examples/quickstart.py
tushkanin/jsondataclass
0
6631527
from dataclasses import dataclass from jsondataclass import to_json, from_json @dataclass class Movie: name: str year: int county: str movie = Movie("Terminator: Dark Fate", 2019, "USA") print(to_json(movie)) # > {"name": "Terminator: Dark Fate", "year": 2019, "county": "USA"} json_str = '{"name": "Terminator: Dark Fate", "year": 2019, "county": "USA"}' print(from_json(json_str, Movie)) # > Movie(name='Terminator: Dark Fate', year=2019, county='USA')
from dataclasses import dataclass from jsondataclass import to_json, from_json @dataclass class Movie: name: str year: int county: str movie = Movie("Terminator: Dark Fate", 2019, "USA") print(to_json(movie)) # > {"name": "Terminator: Dark Fate", "year": 2019, "county": "USA"} json_str = '{"name": "Terminator: Dark Fate", "year": 2019, "county": "USA"}' print(from_json(json_str, Movie)) # > Movie(name='Terminator: Dark Fate', year=2019, county='USA')
en
0.541332
# > {"name": "Terminator: Dark Fate", "year": 2019, "county": "USA"} # > Movie(name='Terminator: Dark Fate', year=2019, county='USA')
3.703198
4
from_python_community/move_zeroes.py
ZaytsevNS/python_practice
0
6631528
# Условие: # Ваша задача — написать функцию, которая перемещает все нули в конец списка. # Функция принимает список с набором цифр, а ваша задача — изменить его так, что бы нули оказались в конце списка. # Она ничего не возвращает, а лишь меняет полученный список. Порядок ненулевых чисел должен сохранится. import unittest def move_zeroes(lst: list) -> list: lst[:] = [i for i in lst if i != 0] + [0] * lst.count(0) return lst class TestMoveZeroes(unittest.TestCase): def test_one(self): """ Should return list with zeros at the end of the list """ self.assertEqual([1, 3, 12, 0, 0], move_zeroes([0, 1, 0, 3, 12])) self.assertEqual([1, 3, 5, 0, 0, 0, 0], move_zeroes([1, 0, 3, 0, 0, 0, 5])) def test_two(self): """ Should return list with zero """ self.assertEqual([0], move_zeroes([0])) if __name__ == '__main__': unittest.main()
# Условие: # Ваша задача — написать функцию, которая перемещает все нули в конец списка. # Функция принимает список с набором цифр, а ваша задача — изменить его так, что бы нули оказались в конце списка. # Она ничего не возвращает, а лишь меняет полученный список. Порядок ненулевых чисел должен сохранится. import unittest def move_zeroes(lst: list) -> list: lst[:] = [i for i in lst if i != 0] + [0] * lst.count(0) return lst class TestMoveZeroes(unittest.TestCase): def test_one(self): """ Should return list with zeros at the end of the list """ self.assertEqual([1, 3, 12, 0, 0], move_zeroes([0, 1, 0, 3, 12])) self.assertEqual([1, 3, 5, 0, 0, 0, 0], move_zeroes([1, 0, 3, 0, 0, 0, 5])) def test_two(self): """ Should return list with zero """ self.assertEqual([0], move_zeroes([0])) if __name__ == '__main__': unittest.main()
ru
0.996104
# Условие: # Ваша задача — написать функцию, которая перемещает все нули в конец списка. # Функция принимает список с набором цифр, а ваша задача — изменить его так, что бы нули оказались в конце списка. # Она ничего не возвращает, а лишь меняет полученный список. Порядок ненулевых чисел должен сохранится. Should return list with zeros at the end of the list Should return list with zero
4.381165
4
src/register.py
squidt/git-custom-commands
0
6631529
import sys from git import Repo # what we're doing in git terms: # # git config alias.<name> '!"<python.exe filepath>" "<script>"' # # english: # # Create a .bat 'in place' within the alias and make it call a python script. # The python script can then run any amount of git commands or do whatever # it wants. def make_bat_script(filepath_py_script): return '!"' + sys.executable + '" "' + filepath_py_script + '"' def make_str_alias(name): return 'alias.' + name def command(name, filepath_script): CONST_BATCH_SCRIPT = make_bat_script(filepath_script) CONST_ALIAS_FULL = make_str_alias(name) Repo().git.execute(['git', 'config', '--global', CONST_ALIAS_FULL, CONST_BATCH_SCRIPT]) def command_local(name, filepath_script, filepath_repository): CONST_BAT_SCRIPT = make_bat_script(filepath_script) CONST_ALIAS_FULL = make_str_alias(name) Repo(filepath_repository).git.execute(['git', 'config', CONST_ALIAS_FULL])
import sys from git import Repo # what we're doing in git terms: # # git config alias.<name> '!"<python.exe filepath>" "<script>"' # # english: # # Create a .bat 'in place' within the alias and make it call a python script. # The python script can then run any amount of git commands or do whatever # it wants. def make_bat_script(filepath_py_script): return '!"' + sys.executable + '" "' + filepath_py_script + '"' def make_str_alias(name): return 'alias.' + name def command(name, filepath_script): CONST_BATCH_SCRIPT = make_bat_script(filepath_script) CONST_ALIAS_FULL = make_str_alias(name) Repo().git.execute(['git', 'config', '--global', CONST_ALIAS_FULL, CONST_BATCH_SCRIPT]) def command_local(name, filepath_script, filepath_repository): CONST_BAT_SCRIPT = make_bat_script(filepath_script) CONST_ALIAS_FULL = make_str_alias(name) Repo(filepath_repository).git.execute(['git', 'config', CONST_ALIAS_FULL])
en
0.635293
# what we're doing in git terms: # # git config alias.<name> '!"<python.exe filepath>" "<script>"' # # english: # # Create a .bat 'in place' within the alias and make it call a python script. # The python script can then run any amount of git commands or do whatever # it wants.
2.466906
2
ultrasonic/ultrasonic.py
pankajdahilkar/FishPondMonetor
0
6631530
import RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BOARD) TRIG = 16 ECHO = 18 i=0 GPIO.setup(TRIG,GPIO.OUT) GPIO.setup(ECHO,GPIO.IN) GPIO.output(TRIG, False) print("Calibrating.....") time.sleep(2) print ("Place the object......") max_water_level = 18 min_water_level = 5 try: while True: GPIO.output(TRIG, True) time.sleep(0.00001) GPIO.output(TRIG, False) while GPIO.input(ECHO)==0: pulse_start = time.time() while GPIO.input(ECHO)==1: pulse_end = time.time() pulse_duration = pulse_end - pulse_start distance = pulse_duration * 17150 distance = round(distance+1.15, 2) distance_actual = distance - 5 level_water = 13 - distance_actual; percenatge_water_level = level_water *10 if percenatge_water_level < 0: percenatge_water_level =0 print("water level= ",percenatge_water_level," %") url = "https://world4.tech/fsecure/upload.php?val="+str(int(percenatge_water_level))+" %" resp =req.get(url) print(resp.text) time.sleep(2) except KeyboardInterrupt: GPIO.cleanup()
import RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BOARD) TRIG = 16 ECHO = 18 i=0 GPIO.setup(TRIG,GPIO.OUT) GPIO.setup(ECHO,GPIO.IN) GPIO.output(TRIG, False) print("Calibrating.....") time.sleep(2) print ("Place the object......") max_water_level = 18 min_water_level = 5 try: while True: GPIO.output(TRIG, True) time.sleep(0.00001) GPIO.output(TRIG, False) while GPIO.input(ECHO)==0: pulse_start = time.time() while GPIO.input(ECHO)==1: pulse_end = time.time() pulse_duration = pulse_end - pulse_start distance = pulse_duration * 17150 distance = round(distance+1.15, 2) distance_actual = distance - 5 level_water = 13 - distance_actual; percenatge_water_level = level_water *10 if percenatge_water_level < 0: percenatge_water_level =0 print("water level= ",percenatge_water_level," %") url = "https://world4.tech/fsecure/upload.php?val="+str(int(percenatge_water_level))+" %" resp =req.get(url) print(resp.text) time.sleep(2) except KeyboardInterrupt: GPIO.cleanup()
none
1
3.268334
3
python/place_components.py
willdickson/ring_light_72mm_id_4_row_168_led
1
6631531
<reponame>willdickson/ring_light_72mm_id_4_row_168_led<filename>python/place_components.py import os import sys import math import pickle import pcbnew from design import RingLightDesign def nm_to_mm(value): return value*1.0e-6 def mm_to_nm(value): return value*1.0e6 def rad_to_deg(value): return 180.0*value/math.pi def deg_to_rad(value): return 180.0*value/math.pi # ----------------------------------------------------------------------------- if __name__ == '__main__': param_filename = sys.argv[1] pcb_filename = sys.argv[2] center_x = 100 center_y = 100 resistor_offset = 5.0 mount_hole_radius = 76.0 mount_hole_offset = 0.5 with open(param_filename, 'rb') as f: param = pickle.load(f) design = RingLightDesign(param) pos_list = design.get_light_position_list() outer_diam = design.get_outer_diameter() pcb = pcbnew.LoadBoard(pcb_filename) led_pos_dict = {} led_angle_dict = {} led_uvec_dict = {} module_list = [module for module in pcb.GetModules()] for module in module_list: ref_str = str(module.GetReference()) if 'D' in ref_str: led_num = int(ref_str[1:]) print(f'LED# {led_num}: {ref_str}') pos = pos_list[led_num-1] led_pos_x = pos['x'] + center_x led_pos_y = pos['y'] + center_y x_nm = mm_to_nm(led_pos_x) y_nm = mm_to_nm(led_pos_y) angle_deg = rad_to_deg(pos['angle']) led_pos_dict[led_num] = led_pos_x, led_pos_y led_angle_dict[led_num] = angle_deg uvec_denom = math.sqrt(pos['x']**2 + pos['y']**2) led_uvec_dict[led_num] = pos['x']/uvec_denom, pos['y']/uvec_denom # Move modules to new position pos = module.GetPosition() pos.x = int(x_nm) pos.y = int(y_nm) module.SetPosition(pos) module.SetOrientation(-10.0*angle_deg) # Make value invisible value_obj = module.Value() value_obj.SetVisible(False) for module in module_list: ref_str = str(module.GetReference()) if 'R' in ref_str: r_num = int(ref_str[1:]) print(f'R# {r_num}: {ref_str}') con_led_num = r_num*param['divisor'] led_pos = led_pos_dict[con_led_num] uvec = led_uvec_dict[con_led_num] #new_pos_x = led_pos[0] #new_pos_y = led_pos[1] new_pos_x = led_pos[0] + resistor_offset*uvec[0] new_pos_y = led_pos[1] + resistor_offset*uvec[1] new_pos_x_nm = mm_to_nm(new_pos_x) new_pos_y_nm = mm_to_nm(new_pos_y) pos = module.GetPosition() pos.x = int(new_pos_x_nm) pos.y = int(new_pos_y_nm) module.SetPosition(pos) module.SetOrientation(-10.0*(led_angle_dict[con_led_num]-90.0)) value_obj = module.Value() value_obj.SetVisible(False) ref_obj = module.Reference() ref_obj.SetVisible(False) mount_hole_dict = {} for module in module_list: ref_str = str(module.GetReference()) if 'M' in ref_str: hole_num = int(ref_str[1:]) mount_hole_dict[hole_num] = module num_mount_hole = len(mount_hole_dict) angle_step = (2.0*math.pi)/float(num_mount_hole) for hole_num, module in mount_hole_dict.items(): angle_rad = (hole_num-1)*angle_step + mount_hole_offset*angle_step print(hole_num, rad_to_deg(angle_rad)) pos = module.GetPosition() pos.x = int(mm_to_nm(mount_hole_radius*math.cos(angle_rad) + center_x)) pos.y = int(mm_to_nm(mount_hole_radius*math.sin(angle_rad) + center_y)) module.SetPosition(pos) pathname, basename = os.path.split(pcb_filename) new_basename = 'mod_{}'.format(basename) new_filename = os.path.join(pathname,new_basename) pcb.Save(new_filename)
import os import sys import math import pickle import pcbnew from design import RingLightDesign def nm_to_mm(value): return value*1.0e-6 def mm_to_nm(value): return value*1.0e6 def rad_to_deg(value): return 180.0*value/math.pi def deg_to_rad(value): return 180.0*value/math.pi # ----------------------------------------------------------------------------- if __name__ == '__main__': param_filename = sys.argv[1] pcb_filename = sys.argv[2] center_x = 100 center_y = 100 resistor_offset = 5.0 mount_hole_radius = 76.0 mount_hole_offset = 0.5 with open(param_filename, 'rb') as f: param = pickle.load(f) design = RingLightDesign(param) pos_list = design.get_light_position_list() outer_diam = design.get_outer_diameter() pcb = pcbnew.LoadBoard(pcb_filename) led_pos_dict = {} led_angle_dict = {} led_uvec_dict = {} module_list = [module for module in pcb.GetModules()] for module in module_list: ref_str = str(module.GetReference()) if 'D' in ref_str: led_num = int(ref_str[1:]) print(f'LED# {led_num}: {ref_str}') pos = pos_list[led_num-1] led_pos_x = pos['x'] + center_x led_pos_y = pos['y'] + center_y x_nm = mm_to_nm(led_pos_x) y_nm = mm_to_nm(led_pos_y) angle_deg = rad_to_deg(pos['angle']) led_pos_dict[led_num] = led_pos_x, led_pos_y led_angle_dict[led_num] = angle_deg uvec_denom = math.sqrt(pos['x']**2 + pos['y']**2) led_uvec_dict[led_num] = pos['x']/uvec_denom, pos['y']/uvec_denom # Move modules to new position pos = module.GetPosition() pos.x = int(x_nm) pos.y = int(y_nm) module.SetPosition(pos) module.SetOrientation(-10.0*angle_deg) # Make value invisible value_obj = module.Value() value_obj.SetVisible(False) for module in module_list: ref_str = str(module.GetReference()) if 'R' in ref_str: r_num = int(ref_str[1:]) print(f'R# {r_num}: {ref_str}') con_led_num = r_num*param['divisor'] led_pos = led_pos_dict[con_led_num] uvec = led_uvec_dict[con_led_num] #new_pos_x = led_pos[0] #new_pos_y = led_pos[1] new_pos_x = led_pos[0] + resistor_offset*uvec[0] new_pos_y = led_pos[1] + resistor_offset*uvec[1] new_pos_x_nm = mm_to_nm(new_pos_x) new_pos_y_nm = mm_to_nm(new_pos_y) pos = module.GetPosition() pos.x = int(new_pos_x_nm) pos.y = int(new_pos_y_nm) module.SetPosition(pos) module.SetOrientation(-10.0*(led_angle_dict[con_led_num]-90.0)) value_obj = module.Value() value_obj.SetVisible(False) ref_obj = module.Reference() ref_obj.SetVisible(False) mount_hole_dict = {} for module in module_list: ref_str = str(module.GetReference()) if 'M' in ref_str: hole_num = int(ref_str[1:]) mount_hole_dict[hole_num] = module num_mount_hole = len(mount_hole_dict) angle_step = (2.0*math.pi)/float(num_mount_hole) for hole_num, module in mount_hole_dict.items(): angle_rad = (hole_num-1)*angle_step + mount_hole_offset*angle_step print(hole_num, rad_to_deg(angle_rad)) pos = module.GetPosition() pos.x = int(mm_to_nm(mount_hole_radius*math.cos(angle_rad) + center_x)) pos.y = int(mm_to_nm(mount_hole_radius*math.sin(angle_rad) + center_y)) module.SetPosition(pos) pathname, basename = os.path.split(pcb_filename) new_basename = 'mod_{}'.format(basename) new_filename = os.path.join(pathname,new_basename) pcb.Save(new_filename)
en
0.169628
# ----------------------------------------------------------------------------- # {led_num}: {ref_str}') # Move modules to new position # Make value invisible # {r_num}: {ref_str}') #new_pos_x = led_pos[0] #new_pos_y = led_pos[1]
2.470296
2
test/test_linting.py
druvus/bioconda-utils
0
6631532
from helpers import Recipes from bioconda_utils import lint_functions from bioconda_utils import linting, utils def run_lint( func, should_pass, should_fail ): """ Helper function to run a lint function on a set of recipes that should pass and that should fail. Recall each lint function takes recipe path, parsed meta.yaml, and dataframe of channel info. Parameters ---------- func : function Function to test in `bioconda_utils.lint_functions` should_pass, should_fail : str or list Recipe definitions that will be provided to `helpers.Recipes`. Each can be a single string or a list of strings to test. """ if isinstance(should_pass, str): should_pass = [should_pass] if isinstance(should_fail, str): should_fail = [should_fail] def _run(contents, expect_pass=True): """ Build the recipe and run the lint function on the rendered recipe """ r = Recipes(contents, from_string=True) r.write_recipes() assert len(r.recipe_dirs) == 1 name = list(r.recipe_dirs.keys())[0] recipe = r.recipe_dirs[name] metas = [] for platform in ["linux", "osx"]: config = utils.load_conda_build_config(platform=platform, trim_skip=False) metas.extend(utils.load_all_meta(r.recipe_dirs[name], config=config, finalize=False)) if expect_pass: assert func(recipe, metas) is None, "lint did not pass" else: assert func(recipe, metas) is not None, "lint did not fail" for contents in should_pass: _run(contents, expect_pass=True) for contents in should_fail: _run(contents, expect_pass=False) def test_empty_build_section(): r = Recipes( ''' empty_build_section: meta.yaml: | package: name: empty_build_section version: "0.1" build: ''', from_string=True) r.write_recipes() # access to contents of possibly empty build section can happen in # `should_be_noarch` and `should_not_be_noarch` registry = [lint_functions.should_be_noarch, lint_functions.should_not_be_noarch] res = linting.lint( r.recipe_dirs.values(), linting.LintArgs(registry=registry)) assert res is None def test_lint_skip_in_recipe(): # should fail (note we're only linting `missing_home`) r = Recipes( ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" ''', from_string=True) r.write_recipes() res = linting.lint( r.recipe_dirs.values(), linting.LintArgs(registry=[lint_functions.missing_home])) assert res is not None # should now pass with the extra:skip-lints (only linting for `missing_home`) r = Recipes( ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" extra: skip-lints: - missing_home ''', from_string=True) r.write_recipes() res = linting.lint( r.recipe_dirs.values(), linting.LintArgs(registry=[lint_functions.missing_home])) assert res is None # should pass; minimal recipe needs to skip these lints r = Recipes( ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" extra: skip-lints: - missing_home - missing_license - no_tests ''', from_string=True) r.write_recipes() res = linting.lint(r.recipe_dirs.values(), linting.LintArgs()) assert res is not None def test_missing_home(): run_lint( func=lint_functions.missing_home, should_pass=[ ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" about: home: "http://bioconda.github.io" ''', ], should_fail=[ ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" ''', ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" about: home: "" ''', ], ) def test_missing_summary(): run_lint( func=lint_functions.missing_summary, should_pass=[ ''' missing_summary: meta.yaml: | package: name: missing_summary version: "0.1" about: summary: "tool description" ''', ], should_fail=[ ''' missing_summary: meta.yaml: | package: name: missing_summary version: "0.1" ''', ''' missing_summary: meta.yaml: | package: name: missing_summary version: "0.1" about: summary: "" ''', ], ) def test_missing_license(): run_lint( func=lint_functions.missing_license, should_pass=[ ''' missing_license: meta.yaml: | package: name: missing_license version: "0.1" about: license: "MIT" ''', ], should_fail=[ ''' missing_license: meta.yaml: | package: name: missing_license version: "0.1" ''', ''' missing_license: meta.yaml: | package: name: missing_license version: "0.1" about: license: "" ''', ], ) def test_missing_tests(): run_lint( func=lint_functions.missing_tests, should_pass=[ ''' missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" test: commands: "ls" ''', ''' missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" run_test.sh: "" ''', ''' missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" run_test.py: "" ''', ], should_fail=[ ''' missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" run_tst.sh: "" ''', ''' missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" test: # empty test section ''', ], ) def test_missing_hash(): run_lint( func=lint_functions.missing_hash, should_pass=[ ''' missing_hash: meta.yaml: | package: name: md5hash version: "0.1" source: md5: 11111111111111111111111111111111 ''', ''' missing_hash: meta.yaml: | package: name: md5hash_list version: "0.1" source: - md5: 11111111111111111111111111111111 ''', # Should pass when source section is missing ''' missing_hash: meta.yaml: | package: name: metapackage version: "0.1" ''', ], should_fail=[ ''' missing_hash: meta.yaml: | package: name: missing_hash version: "0.1" source: fn: "a.txt" ''', ''' missing_hash: meta.yaml: | package: name: empty_hash version: "0.1" source: fn: "a.txt" sha256: "" ''', ''' missing_hash: meta.yaml: | package: name: missing_hash_list version: "0.1" source: - fn: "a.txt" - md5: 11111111111111111111111111111111 ''', ], ) def test_uses_git_url(): run_lint( func=lint_functions.uses_git_url, should_pass=[ ''' uses_git_url: meta.yaml: | package: name: uses_git_url version: "0.1" source: fn: "a.txt" ''', ''' uses_git_url: meta.yaml: | package: name: uses_git_url version: "0.1" ''', ''' uses_git_url: meta.yaml: | package: name: uses_git_url_list version: "0.1" source: - fn: "a.txt" ''', ], should_fail=[ ''' uses_git_url: meta.yaml: | package: name: uses_git_url version: "0.1" source: git_url: https://github.com/bioconda/bioconda.git ''', ''' uses_git_url: meta.yaml: | package: name: uses_git_url_list version: "0.1" source: - git_url: https://github.com/bioconda/bioconda.git ''', ], ) def test_uses_perl_threaded(): run_lint( func=lint_functions.uses_perl_threaded, should_pass=[ ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: build: - perl run: - perl ''', ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: run: - perl ''', ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: build: - perl ''', ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" ''', ], should_fail=[ ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: build: - perl-threaded ''', ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: run: - perl-threaded ''', ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: run: - perl-threaded build: - perl-threaded ''', ], ) def test_uses_javajdk(): run_lint( func=lint_functions.uses_javajdk, should_pass=[ ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: build: - openjdk run: - openjdk ''', ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: run: - openjdk ''', ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: build: - openjdk ''', ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" ''', ], should_fail=[ ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: build: - java-jdk ''', ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: run: - java-jdk ''', ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: run: - java-jdk build: - java-jdk ''', ], ) def test_uses_setuptools(): run_lint( func=lint_functions.uses_setuptools, should_pass=[ ''' uses_setuptools: meta.yaml: | package: name: uses_setuptools version: "0.1" ''', ''' uses_setuptools: meta.yaml: | package: name: uses_setuptools version: "0.1" requirements: build: - setuptools ''', ], should_fail=[ ''' uses_setuptools: meta.yaml: | package: name: uses_setuptools version: "0.1" requirements: run: - setuptools ''', ], ) def test_has_windows_bat_file(): run_lint( func=lint_functions.has_windows_bat_file, should_pass=[ ''' has_windows_bat_file: meta.yaml: | package: name: has_windows_bat_file version: "0.1" ''', ], should_fail=[ ''' has_windows_bat_file: meta.yaml: | package: name: has_windows_bat_file version: "0.1" build.bat: "" ''', ''' has_windows_bat_file: meta.yaml: | package: name: has_windows_bat_file version: "0.1" any.bat: "" ''', ] ) def test_should_not_be_noarch(): run_lint( func=lint_functions.should_not_be_noarch, should_pass=[ ''' should_be_noarch1: meta.yaml: | package: name: should_be_noarch1 version: "0.1" build: noarch: python ''', ''' should_be_noarch2: meta.yaml: | package: name: should_be_noarch2 version: "0.1" build: noarch: python skip: false ''', ], should_fail=[ ''' should_not_be_noarch1: meta.yaml: | package: name: should_not_be_noarch1 version: "0.1" build: noarch: python requirements: build: - gcc ''', ''' should_not_be_noarch2: meta.yaml: | package: name: should_not_be_noarch2 version: "0.1" build: noarch: python skip: True # [osx] ''', ''' should_not_be_noarch3: meta.yaml: | package: name: should_not_be_noarch3 version: "0.1" build: noarch: python skip: False requirements: build: - gcc ''', ] ) def test_setup_py_install_args(): run_lint( func=lint_functions.setup_py_install_args, should_pass=[ ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" build.sh: | $PYTHON setup.py install ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" build: script: $PYTHON setup.py install ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build.sh: | $PYTHON setup.py install --single-version-externally-managed --report=a.txt ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build.sh: | $PYTHON setup.py install \\ --single-version-externally-managed --report=a.txt ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" build: script: $PYTHON setup.py install --single-version-externally-managed --report=a.txt ''', # noqa: E501: line too long ], should_fail=[ ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build.sh: | $PYTHON setup.py install ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build: script: $PYTHON setup.py install ''', ] ) def test_invalid_identifiers(): run_lint( func=lint_functions.invalid_identifiers, should_pass=[ ''' a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: - doi:10.1093/bioinformatics/btr010 ''', ], should_fail=[ ''' a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: - doi: 10.1093/bioinformatics/btr010 ''', ''' a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: doi: 10.1093/bioinformatics/btr010 ''', ''' a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: doi:10.1093/bioinformatics/btr010 ''', ] ) def test_deprecated_numpy_spec(): run_lint( func=lint_functions.deprecated_numpy_spec, should_pass=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: build: - numpy - python run: - numpy ''', ], should_fail=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: build: - numpy x.x run: - numpy x.x ''', ] ) def test_should_use_compilers(): run_lint( func=lint_functions.should_use_compilers, should_pass=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: host: - python run: - python ''', ''' a: meta.yaml: | package: name: a version: 0.1 requirements: build: - {{ compiler ('c') }} ''', ], should_fail=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: build: - gcc # [linux] ''', ''' a: meta.yaml: | package: name: a version: 0.1 requirements: run: - libgcc # [linux] ''', ] ) def test_compilers_must_be_in_build(): run_lint( func=lint_functions.compilers_must_be_in_build, should_pass=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: host: - python run: - python package: name: a version: 0.1 requirements: build: - {{ compiler ('c') }} ''', ], should_fail=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: run: - {{ compiler("c") }} ''', ''' a: meta.yaml: | package: name: a version: 0.1 requirements: host: - {{ compiler ('c') }} ''', ] ) def test_should_not_use_fn(): run_lint( func=lint_functions.should_not_use_fn, should_pass=[ ''' a: meta.yaml: | package: name: a version: 0.1 ''', ''' a: meta.yaml: | package: name: a version: 0.1 source: url: https://bioconda.github.io/index.html ''', ], should_fail=[ ''' a: meta.yaml: | package: name: a version: 0.1 source: fn: index.html url: https://bioconda.github.io/index.html ''', ] ) #def test_bioconductor_37(): # run_lint( # func=lint_functions.bioconductor_37, # should_pass=[ # ''' # a: # meta.yaml: | # {% set bioc = "3.6" %} # package: # name: a # version: 0.1 # ''', # ], # should_fail=[ # ''' # a: # meta.yaml: | # {% set bioc = "3.7" %} # package: # name: a # version: 0.1 # ''', # ''' # a: # meta.yaml: | # {% set bioc = "release" %} # package: # name: a # version: 0.1 # ''', # ] # )
from helpers import Recipes from bioconda_utils import lint_functions from bioconda_utils import linting, utils def run_lint( func, should_pass, should_fail ): """ Helper function to run a lint function on a set of recipes that should pass and that should fail. Recall each lint function takes recipe path, parsed meta.yaml, and dataframe of channel info. Parameters ---------- func : function Function to test in `bioconda_utils.lint_functions` should_pass, should_fail : str or list Recipe definitions that will be provided to `helpers.Recipes`. Each can be a single string or a list of strings to test. """ if isinstance(should_pass, str): should_pass = [should_pass] if isinstance(should_fail, str): should_fail = [should_fail] def _run(contents, expect_pass=True): """ Build the recipe and run the lint function on the rendered recipe """ r = Recipes(contents, from_string=True) r.write_recipes() assert len(r.recipe_dirs) == 1 name = list(r.recipe_dirs.keys())[0] recipe = r.recipe_dirs[name] metas = [] for platform in ["linux", "osx"]: config = utils.load_conda_build_config(platform=platform, trim_skip=False) metas.extend(utils.load_all_meta(r.recipe_dirs[name], config=config, finalize=False)) if expect_pass: assert func(recipe, metas) is None, "lint did not pass" else: assert func(recipe, metas) is not None, "lint did not fail" for contents in should_pass: _run(contents, expect_pass=True) for contents in should_fail: _run(contents, expect_pass=False) def test_empty_build_section(): r = Recipes( ''' empty_build_section: meta.yaml: | package: name: empty_build_section version: "0.1" build: ''', from_string=True) r.write_recipes() # access to contents of possibly empty build section can happen in # `should_be_noarch` and `should_not_be_noarch` registry = [lint_functions.should_be_noarch, lint_functions.should_not_be_noarch] res = linting.lint( r.recipe_dirs.values(), linting.LintArgs(registry=registry)) assert res is None def test_lint_skip_in_recipe(): # should fail (note we're only linting `missing_home`) r = Recipes( ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" ''', from_string=True) r.write_recipes() res = linting.lint( r.recipe_dirs.values(), linting.LintArgs(registry=[lint_functions.missing_home])) assert res is not None # should now pass with the extra:skip-lints (only linting for `missing_home`) r = Recipes( ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" extra: skip-lints: - missing_home ''', from_string=True) r.write_recipes() res = linting.lint( r.recipe_dirs.values(), linting.LintArgs(registry=[lint_functions.missing_home])) assert res is None # should pass; minimal recipe needs to skip these lints r = Recipes( ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" extra: skip-lints: - missing_home - missing_license - no_tests ''', from_string=True) r.write_recipes() res = linting.lint(r.recipe_dirs.values(), linting.LintArgs()) assert res is not None def test_missing_home(): run_lint( func=lint_functions.missing_home, should_pass=[ ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" about: home: "http://bioconda.github.io" ''', ], should_fail=[ ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" ''', ''' missing_home: meta.yaml: | package: name: missing_home version: "0.1" about: home: "" ''', ], ) def test_missing_summary(): run_lint( func=lint_functions.missing_summary, should_pass=[ ''' missing_summary: meta.yaml: | package: name: missing_summary version: "0.1" about: summary: "tool description" ''', ], should_fail=[ ''' missing_summary: meta.yaml: | package: name: missing_summary version: "0.1" ''', ''' missing_summary: meta.yaml: | package: name: missing_summary version: "0.1" about: summary: "" ''', ], ) def test_missing_license(): run_lint( func=lint_functions.missing_license, should_pass=[ ''' missing_license: meta.yaml: | package: name: missing_license version: "0.1" about: license: "MIT" ''', ], should_fail=[ ''' missing_license: meta.yaml: | package: name: missing_license version: "0.1" ''', ''' missing_license: meta.yaml: | package: name: missing_license version: "0.1" about: license: "" ''', ], ) def test_missing_tests(): run_lint( func=lint_functions.missing_tests, should_pass=[ ''' missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" test: commands: "ls" ''', ''' missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" run_test.sh: "" ''', ''' missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" run_test.py: "" ''', ], should_fail=[ ''' missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" run_tst.sh: "" ''', ''' missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" test: # empty test section ''', ], ) def test_missing_hash(): run_lint( func=lint_functions.missing_hash, should_pass=[ ''' missing_hash: meta.yaml: | package: name: md5hash version: "0.1" source: md5: 11111111111111111111111111111111 ''', ''' missing_hash: meta.yaml: | package: name: md5hash_list version: "0.1" source: - md5: 11111111111111111111111111111111 ''', # Should pass when source section is missing ''' missing_hash: meta.yaml: | package: name: metapackage version: "0.1" ''', ], should_fail=[ ''' missing_hash: meta.yaml: | package: name: missing_hash version: "0.1" source: fn: "a.txt" ''', ''' missing_hash: meta.yaml: | package: name: empty_hash version: "0.1" source: fn: "a.txt" sha256: "" ''', ''' missing_hash: meta.yaml: | package: name: missing_hash_list version: "0.1" source: - fn: "a.txt" - md5: 11111111111111111111111111111111 ''', ], ) def test_uses_git_url(): run_lint( func=lint_functions.uses_git_url, should_pass=[ ''' uses_git_url: meta.yaml: | package: name: uses_git_url version: "0.1" source: fn: "a.txt" ''', ''' uses_git_url: meta.yaml: | package: name: uses_git_url version: "0.1" ''', ''' uses_git_url: meta.yaml: | package: name: uses_git_url_list version: "0.1" source: - fn: "a.txt" ''', ], should_fail=[ ''' uses_git_url: meta.yaml: | package: name: uses_git_url version: "0.1" source: git_url: https://github.com/bioconda/bioconda.git ''', ''' uses_git_url: meta.yaml: | package: name: uses_git_url_list version: "0.1" source: - git_url: https://github.com/bioconda/bioconda.git ''', ], ) def test_uses_perl_threaded(): run_lint( func=lint_functions.uses_perl_threaded, should_pass=[ ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: build: - perl run: - perl ''', ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: run: - perl ''', ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: build: - perl ''', ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" ''', ], should_fail=[ ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: build: - perl-threaded ''', ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: run: - perl-threaded ''', ''' uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: run: - perl-threaded build: - perl-threaded ''', ], ) def test_uses_javajdk(): run_lint( func=lint_functions.uses_javajdk, should_pass=[ ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: build: - openjdk run: - openjdk ''', ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: run: - openjdk ''', ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: build: - openjdk ''', ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" ''', ], should_fail=[ ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: build: - java-jdk ''', ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: run: - java-jdk ''', ''' uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: run: - java-jdk build: - java-jdk ''', ], ) def test_uses_setuptools(): run_lint( func=lint_functions.uses_setuptools, should_pass=[ ''' uses_setuptools: meta.yaml: | package: name: uses_setuptools version: "0.1" ''', ''' uses_setuptools: meta.yaml: | package: name: uses_setuptools version: "0.1" requirements: build: - setuptools ''', ], should_fail=[ ''' uses_setuptools: meta.yaml: | package: name: uses_setuptools version: "0.1" requirements: run: - setuptools ''', ], ) def test_has_windows_bat_file(): run_lint( func=lint_functions.has_windows_bat_file, should_pass=[ ''' has_windows_bat_file: meta.yaml: | package: name: has_windows_bat_file version: "0.1" ''', ], should_fail=[ ''' has_windows_bat_file: meta.yaml: | package: name: has_windows_bat_file version: "0.1" build.bat: "" ''', ''' has_windows_bat_file: meta.yaml: | package: name: has_windows_bat_file version: "0.1" any.bat: "" ''', ] ) def test_should_not_be_noarch(): run_lint( func=lint_functions.should_not_be_noarch, should_pass=[ ''' should_be_noarch1: meta.yaml: | package: name: should_be_noarch1 version: "0.1" build: noarch: python ''', ''' should_be_noarch2: meta.yaml: | package: name: should_be_noarch2 version: "0.1" build: noarch: python skip: false ''', ], should_fail=[ ''' should_not_be_noarch1: meta.yaml: | package: name: should_not_be_noarch1 version: "0.1" build: noarch: python requirements: build: - gcc ''', ''' should_not_be_noarch2: meta.yaml: | package: name: should_not_be_noarch2 version: "0.1" build: noarch: python skip: True # [osx] ''', ''' should_not_be_noarch3: meta.yaml: | package: name: should_not_be_noarch3 version: "0.1" build: noarch: python skip: False requirements: build: - gcc ''', ] ) def test_setup_py_install_args(): run_lint( func=lint_functions.setup_py_install_args, should_pass=[ ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" build.sh: | $PYTHON setup.py install ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" build: script: $PYTHON setup.py install ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build.sh: | $PYTHON setup.py install --single-version-externally-managed --report=a.txt ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build.sh: | $PYTHON setup.py install \\ --single-version-externally-managed --report=a.txt ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" build: script: $PYTHON setup.py install --single-version-externally-managed --report=a.txt ''', # noqa: E501: line too long ], should_fail=[ ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build.sh: | $PYTHON setup.py install ''', ''' setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build: script: $PYTHON setup.py install ''', ] ) def test_invalid_identifiers(): run_lint( func=lint_functions.invalid_identifiers, should_pass=[ ''' a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: - doi:10.1093/bioinformatics/btr010 ''', ], should_fail=[ ''' a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: - doi: 10.1093/bioinformatics/btr010 ''', ''' a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: doi: 10.1093/bioinformatics/btr010 ''', ''' a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: doi:10.1093/bioinformatics/btr010 ''', ] ) def test_deprecated_numpy_spec(): run_lint( func=lint_functions.deprecated_numpy_spec, should_pass=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: build: - numpy - python run: - numpy ''', ], should_fail=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: build: - numpy x.x run: - numpy x.x ''', ] ) def test_should_use_compilers(): run_lint( func=lint_functions.should_use_compilers, should_pass=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: host: - python run: - python ''', ''' a: meta.yaml: | package: name: a version: 0.1 requirements: build: - {{ compiler ('c') }} ''', ], should_fail=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: build: - gcc # [linux] ''', ''' a: meta.yaml: | package: name: a version: 0.1 requirements: run: - libgcc # [linux] ''', ] ) def test_compilers_must_be_in_build(): run_lint( func=lint_functions.compilers_must_be_in_build, should_pass=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: host: - python run: - python package: name: a version: 0.1 requirements: build: - {{ compiler ('c') }} ''', ], should_fail=[ ''' a: meta.yaml: | package: name: a version: 0.1 requirements: run: - {{ compiler("c") }} ''', ''' a: meta.yaml: | package: name: a version: 0.1 requirements: host: - {{ compiler ('c') }} ''', ] ) def test_should_not_use_fn(): run_lint( func=lint_functions.should_not_use_fn, should_pass=[ ''' a: meta.yaml: | package: name: a version: 0.1 ''', ''' a: meta.yaml: | package: name: a version: 0.1 source: url: https://bioconda.github.io/index.html ''', ], should_fail=[ ''' a: meta.yaml: | package: name: a version: 0.1 source: fn: index.html url: https://bioconda.github.io/index.html ''', ] ) #def test_bioconductor_37(): # run_lint( # func=lint_functions.bioconductor_37, # should_pass=[ # ''' # a: # meta.yaml: | # {% set bioc = "3.6" %} # package: # name: a # version: 0.1 # ''', # ], # should_fail=[ # ''' # a: # meta.yaml: | # {% set bioc = "3.7" %} # package: # name: a # version: 0.1 # ''', # ''' # a: # meta.yaml: | # {% set bioc = "release" %} # package: # name: a # version: 0.1 # ''', # ] # )
en
0.532071
Helper function to run a lint function on a set of recipes that should pass and that should fail. Recall each lint function takes recipe path, parsed meta.yaml, and dataframe of channel info. Parameters ---------- func : function Function to test in `bioconda_utils.lint_functions` should_pass, should_fail : str or list Recipe definitions that will be provided to `helpers.Recipes`. Each can be a single string or a list of strings to test. Build the recipe and run the lint function on the rendered recipe empty_build_section: meta.yaml: | package: name: empty_build_section version: "0.1" build: # access to contents of possibly empty build section can happen in # `should_be_noarch` and `should_not_be_noarch` # should fail (note we're only linting `missing_home`) missing_home: meta.yaml: | package: name: missing_home version: "0.1" # should now pass with the extra:skip-lints (only linting for `missing_home`) missing_home: meta.yaml: | package: name: missing_home version: "0.1" extra: skip-lints: - missing_home # should pass; minimal recipe needs to skip these lints missing_home: meta.yaml: | package: name: missing_home version: "0.1" extra: skip-lints: - missing_home - missing_license - no_tests missing_home: meta.yaml: | package: name: missing_home version: "0.1" about: home: "http://bioconda.github.io" missing_home: meta.yaml: | package: name: missing_home version: "0.1" missing_home: meta.yaml: | package: name: missing_home version: "0.1" about: home: "" missing_summary: meta.yaml: | package: name: missing_summary version: "0.1" about: summary: "tool description" missing_summary: meta.yaml: | package: name: missing_summary version: "0.1" missing_summary: meta.yaml: | package: name: missing_summary version: "0.1" about: summary: "" missing_license: meta.yaml: | package: name: missing_license version: "0.1" about: license: "MIT" missing_license: meta.yaml: | package: name: missing_license version: "0.1" missing_license: meta.yaml: | package: name: missing_license version: "0.1" about: license: "" missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" test: commands: "ls" missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" run_test.sh: "" missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" run_test.py: "" missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" run_tst.sh: "" missing_tests: meta.yaml: | package: name: missing_tests version: "0.1" test: # empty test section missing_hash: meta.yaml: | package: name: md5hash version: "0.1" source: md5: 11111111111111111111111111111111 missing_hash: meta.yaml: | package: name: md5hash_list version: "0.1" source: - md5: 11111111111111111111111111111111 # Should pass when source section is missing missing_hash: meta.yaml: | package: name: metapackage version: "0.1" missing_hash: meta.yaml: | package: name: missing_hash version: "0.1" source: fn: "a.txt" missing_hash: meta.yaml: | package: name: empty_hash version: "0.1" source: fn: "a.txt" sha256: "" missing_hash: meta.yaml: | package: name: missing_hash_list version: "0.1" source: - fn: "a.txt" - md5: 11111111111111111111111111111111 uses_git_url: meta.yaml: | package: name: uses_git_url version: "0.1" source: fn: "a.txt" uses_git_url: meta.yaml: | package: name: uses_git_url version: "0.1" uses_git_url: meta.yaml: | package: name: uses_git_url_list version: "0.1" source: - fn: "a.txt" uses_git_url: meta.yaml: | package: name: uses_git_url version: "0.1" source: git_url: https://github.com/bioconda/bioconda.git uses_git_url: meta.yaml: | package: name: uses_git_url_list version: "0.1" source: - git_url: https://github.com/bioconda/bioconda.git uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: build: - perl run: - perl uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: run: - perl uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: build: - perl uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: build: - perl-threaded uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: run: - perl-threaded uses_perl_threaded: meta.yaml: | package: name: uses_perl_threaded version: "0.1" requirements: run: - perl-threaded build: - perl-threaded uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: build: - openjdk run: - openjdk uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: run: - openjdk uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: build: - openjdk uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: build: - java-jdk uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: run: - java-jdk uses_javajdk: meta.yaml: | package: name: uses_javajdk version: "0.1" requirements: run: - java-jdk build: - java-jdk uses_setuptools: meta.yaml: | package: name: uses_setuptools version: "0.1" uses_setuptools: meta.yaml: | package: name: uses_setuptools version: "0.1" requirements: build: - setuptools uses_setuptools: meta.yaml: | package: name: uses_setuptools version: "0.1" requirements: run: - setuptools has_windows_bat_file: meta.yaml: | package: name: has_windows_bat_file version: "0.1" has_windows_bat_file: meta.yaml: | package: name: has_windows_bat_file version: "0.1" build.bat: "" has_windows_bat_file: meta.yaml: | package: name: has_windows_bat_file version: "0.1" any.bat: "" should_be_noarch1: meta.yaml: | package: name: should_be_noarch1 version: "0.1" build: noarch: python should_be_noarch2: meta.yaml: | package: name: should_be_noarch2 version: "0.1" build: noarch: python skip: false should_not_be_noarch1: meta.yaml: | package: name: should_not_be_noarch1 version: "0.1" build: noarch: python requirements: build: - gcc should_not_be_noarch2: meta.yaml: | package: name: should_not_be_noarch2 version: "0.1" build: noarch: python skip: True # [osx] should_not_be_noarch3: meta.yaml: | package: name: should_not_be_noarch3 version: "0.1" build: noarch: python skip: False requirements: build: - gcc setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" build.sh: | $PYTHON setup.py install setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" build: script: $PYTHON setup.py install setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build.sh: | $PYTHON setup.py install --single-version-externally-managed --report=a.txt setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build.sh: | $PYTHON setup.py install \\ --single-version-externally-managed --report=a.txt setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" build: script: $PYTHON setup.py install --single-version-externally-managed --report=a.txt # noqa: E501: line too long setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build.sh: | $PYTHON setup.py install setup_py_install_args: meta.yaml: | package: name: setup_py_install_args version: "0.1" requirements: build: - setuptools build: script: $PYTHON setup.py install a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: - doi:10.1093/bioinformatics/btr010 a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: - doi: 10.1093/bioinformatics/btr010 a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: doi: 10.1093/bioinformatics/btr010 a: meta.yaml: | package: name: a version: 0.1 extra: identifiers: doi:10.1093/bioinformatics/btr010 a: meta.yaml: | package: name: a version: 0.1 requirements: build: - numpy - python run: - numpy a: meta.yaml: | package: name: a version: 0.1 requirements: build: - numpy x.x run: - numpy x.x a: meta.yaml: | package: name: a version: 0.1 requirements: host: - python run: - python a: meta.yaml: | package: name: a version: 0.1 requirements: build: - {{ compiler ('c') }} a: meta.yaml: | package: name: a version: 0.1 requirements: build: - gcc # [linux] a: meta.yaml: | package: name: a version: 0.1 requirements: run: - libgcc # [linux] a: meta.yaml: | package: name: a version: 0.1 requirements: host: - python run: - python package: name: a version: 0.1 requirements: build: - {{ compiler ('c') }} a: meta.yaml: | package: name: a version: 0.1 requirements: run: - {{ compiler("c") }} a: meta.yaml: | package: name: a version: 0.1 requirements: host: - {{ compiler ('c') }} a: meta.yaml: | package: name: a version: 0.1 a: meta.yaml: | package: name: a version: 0.1 source: url: https://bioconda.github.io/index.html a: meta.yaml: | package: name: a version: 0.1 source: fn: index.html url: https://bioconda.github.io/index.html #def test_bioconductor_37(): # run_lint( # func=lint_functions.bioconductor_37, # should_pass=[ # ''' # a: # meta.yaml: | # {% set bioc = "3.6" %} # package: # name: a # version: 0.1 # ''', # ], # should_fail=[ # ''' # a: # meta.yaml: | # {% set bioc = "3.7" %} # package: # name: a # version: 0.1 # ''', # ''' # a: # meta.yaml: | # {% set bioc = "release" %} # package: # name: a # version: 0.1 # ''', # ] # )
2.669441
3
monasca-log-api-2.9.0/monasca_log_api/tests/test_policy.py
scottwedge/OpenStack-Stein
0
6631533
<gh_stars>0 # Copyright 2016-2017 FUJITSU LIMITED # Copyright 2018 OP5 AB # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from falcon import testing from monasca_common.policy import policy_engine as policy from oslo_context import context from oslo_policy import policy as os_policy from monasca_log_api.app.base import request from monasca_log_api.policies import roles_list_to_check_str from monasca_log_api.tests import base class TestPolicyFileCase(base.BaseTestCase): def setUp(self): super(TestPolicyFileCase, self).setUp() self.context = context.RequestContext(user='fake', tenant='fake', roles=['fake']) self.target = {'tenant_id': 'fake'} def test_modified_policy_reloads(self): tmp_file = \ self.create_tempfiles(files=[('policies', '{}')], ext='.yaml')[0] base.BaseTestCase.conf_override(policy_file=tmp_file, group='oslo_policy') policy.reset() policy.init() action = 'example:test' rule = os_policy.RuleDefault(action, '') policy._ENFORCER.register_defaults([rule]) with open(tmp_file, 'w') as policy_file: policy_file.write('{"example:test": ""}') policy.authorize(self.context, action, self.target) with open(tmp_file, 'w') as policy_file: policy_file.write('{"example:test": "!"}') policy._ENFORCER.load_rules(True) self.assertRaises(os_policy.PolicyNotAuthorized, policy.authorize, self.context, action, self.target) class TestPolicyCase(base.BaseTestCase): def setUp(self): super(TestPolicyCase, self).setUp() rules = [ os_policy.RuleDefault("true", "@"), os_policy.RuleDefault("example:allowed", "@"), os_policy.RuleDefault("example:denied", "!"), os_policy.RuleDefault("example:lowercase_monasca_user", "role:monasca_user or role:sysadmin"), os_policy.RuleDefault("example:uppercase_monasca_user", "role:MONASCA_USER or role:sysadmin"), ] policy.reset() policy.init() policy._ENFORCER.register_defaults(rules) def test_authorize_nonexist_action_throws(self): action = "example:noexist" ctx = request.Request( testing.create_environ( path="/", headers={ "X_USER_ID": "fake", "X_PROJECT_ID": "fake", "X_ROLES": "member" } ) ) self.assertRaises(os_policy.PolicyNotRegistered, policy.authorize, ctx.context, action, {}) def test_authorize_bad_action_throws(self): action = "example:denied" ctx = request.Request( testing.create_environ( path="/", headers={ "X_USER_ID": "fake", "X_PROJECT_ID": "fake", "X_ROLES": "member" } ) ) self.assertRaises(os_policy.PolicyNotAuthorized, policy.authorize, ctx.context, action, {}) def test_authorize_bad_action_no_exception(self): action = "example:denied" ctx = request.Request( testing.create_environ( path="/", headers={ "X_USER_ID": "fake", "X_PROJECT_ID": "fake", "X_ROLES": "member" } ) ) result = policy.authorize(ctx.context, action, {}, False) self.assertFalse(result) def test_authorize_good_action(self): action = "example:allowed" ctx = request.Request( testing.create_environ( path="/", headers={ "X_USER_ID": "fake", "X_PROJECT_ID": "fake", "X_ROLES": "member" } ) ) result = policy.authorize(ctx.context, action, {}, False) self.assertTrue(result) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_monasca_user" uppercase_action = "example:uppercase_monasca_user" monasca_user_context = request.Request( testing.create_environ( path="/", headers={ "X_USER_ID": "monasca_user", "X_PROJECT_ID": "fake", "X_ROLES": "MONASCA_user" } ) ) self.assertTrue(policy.authorize(monasca_user_context.context, lowercase_action, {})) self.assertTrue(policy.authorize(monasca_user_context.context, uppercase_action, {})) class RegisteredPoliciesTestCase(base.BaseTestCase): def __init__(self, *args, **kwds): super(RegisteredPoliciesTestCase, self).__init__(*args, **kwds) self.default_roles = ['monasca-user', 'admin'] def test_healthchecks_policies_roles(self): healthcheck_policies = { 'log_api:healthcheck:head': ['any_role'], 'log_api:healthcheck:get': ['any_role'] } self._assert_rules(healthcheck_policies) def test_versions_policies_roles(self): versions_policies = { 'log_api:versions:get': ['any_role'] } self._assert_rules(versions_policies) def test_logs_policies_roles(self): logs_policies = { 'log_api:logs:post': self.default_roles } self._assert_rules(logs_policies) def _assert_rules(self, policies_list): for policy_name in policies_list: registered_rule = policy.get_rules()[policy_name] if hasattr(registered_rule, 'rules'): self.assertEqual(len(registered_rule.rules), len(policies_list[policy_name])) for role in policies_list[policy_name]: ctx = self._get_request_context(role) self.assertTrue(policy.authorize(ctx.context, policy_name, {}) ) @staticmethod def _get_request_context(role): return request.Request( testing.create_environ( path='/', headers={'X_ROLES': role} ) ) class PolicyUtilsTestCase(base.BaseTestCase): def test_roles_list_to_check_str(self): self.assertEqual(roles_list_to_check_str(['test_role']), 'role:test_role') self.assertEqual(roles_list_to_check_str(['role1', 'role2', 'role3']), 'role:role1 or role:role2 or role:role3') self.assertEqual(roles_list_to_check_str(['@']), '@') self.assertEqual(roles_list_to_check_str(['role1', '@', 'role2']), 'role:role1 or @ or role:role2') self.assertIsNone(roles_list_to_check_str(None))
# Copyright 2016-2017 FUJITSU LIMITED # Copyright 2018 OP5 AB # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from falcon import testing from monasca_common.policy import policy_engine as policy from oslo_context import context from oslo_policy import policy as os_policy from monasca_log_api.app.base import request from monasca_log_api.policies import roles_list_to_check_str from monasca_log_api.tests import base class TestPolicyFileCase(base.BaseTestCase): def setUp(self): super(TestPolicyFileCase, self).setUp() self.context = context.RequestContext(user='fake', tenant='fake', roles=['fake']) self.target = {'tenant_id': 'fake'} def test_modified_policy_reloads(self): tmp_file = \ self.create_tempfiles(files=[('policies', '{}')], ext='.yaml')[0] base.BaseTestCase.conf_override(policy_file=tmp_file, group='oslo_policy') policy.reset() policy.init() action = 'example:test' rule = os_policy.RuleDefault(action, '') policy._ENFORCER.register_defaults([rule]) with open(tmp_file, 'w') as policy_file: policy_file.write('{"example:test": ""}') policy.authorize(self.context, action, self.target) with open(tmp_file, 'w') as policy_file: policy_file.write('{"example:test": "!"}') policy._ENFORCER.load_rules(True) self.assertRaises(os_policy.PolicyNotAuthorized, policy.authorize, self.context, action, self.target) class TestPolicyCase(base.BaseTestCase): def setUp(self): super(TestPolicyCase, self).setUp() rules = [ os_policy.RuleDefault("true", "@"), os_policy.RuleDefault("example:allowed", "@"), os_policy.RuleDefault("example:denied", "!"), os_policy.RuleDefault("example:lowercase_monasca_user", "role:monasca_user or role:sysadmin"), os_policy.RuleDefault("example:uppercase_monasca_user", "role:MONASCA_USER or role:sysadmin"), ] policy.reset() policy.init() policy._ENFORCER.register_defaults(rules) def test_authorize_nonexist_action_throws(self): action = "example:noexist" ctx = request.Request( testing.create_environ( path="/", headers={ "X_USER_ID": "fake", "X_PROJECT_ID": "fake", "X_ROLES": "member" } ) ) self.assertRaises(os_policy.PolicyNotRegistered, policy.authorize, ctx.context, action, {}) def test_authorize_bad_action_throws(self): action = "example:denied" ctx = request.Request( testing.create_environ( path="/", headers={ "X_USER_ID": "fake", "X_PROJECT_ID": "fake", "X_ROLES": "member" } ) ) self.assertRaises(os_policy.PolicyNotAuthorized, policy.authorize, ctx.context, action, {}) def test_authorize_bad_action_no_exception(self): action = "example:denied" ctx = request.Request( testing.create_environ( path="/", headers={ "X_USER_ID": "fake", "X_PROJECT_ID": "fake", "X_ROLES": "member" } ) ) result = policy.authorize(ctx.context, action, {}, False) self.assertFalse(result) def test_authorize_good_action(self): action = "example:allowed" ctx = request.Request( testing.create_environ( path="/", headers={ "X_USER_ID": "fake", "X_PROJECT_ID": "fake", "X_ROLES": "member" } ) ) result = policy.authorize(ctx.context, action, {}, False) self.assertTrue(result) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_monasca_user" uppercase_action = "example:uppercase_monasca_user" monasca_user_context = request.Request( testing.create_environ( path="/", headers={ "X_USER_ID": "monasca_user", "X_PROJECT_ID": "fake", "X_ROLES": "MONASCA_user" } ) ) self.assertTrue(policy.authorize(monasca_user_context.context, lowercase_action, {})) self.assertTrue(policy.authorize(monasca_user_context.context, uppercase_action, {})) class RegisteredPoliciesTestCase(base.BaseTestCase): def __init__(self, *args, **kwds): super(RegisteredPoliciesTestCase, self).__init__(*args, **kwds) self.default_roles = ['monasca-user', 'admin'] def test_healthchecks_policies_roles(self): healthcheck_policies = { 'log_api:healthcheck:head': ['any_role'], 'log_api:healthcheck:get': ['any_role'] } self._assert_rules(healthcheck_policies) def test_versions_policies_roles(self): versions_policies = { 'log_api:versions:get': ['any_role'] } self._assert_rules(versions_policies) def test_logs_policies_roles(self): logs_policies = { 'log_api:logs:post': self.default_roles } self._assert_rules(logs_policies) def _assert_rules(self, policies_list): for policy_name in policies_list: registered_rule = policy.get_rules()[policy_name] if hasattr(registered_rule, 'rules'): self.assertEqual(len(registered_rule.rules), len(policies_list[policy_name])) for role in policies_list[policy_name]: ctx = self._get_request_context(role) self.assertTrue(policy.authorize(ctx.context, policy_name, {}) ) @staticmethod def _get_request_context(role): return request.Request( testing.create_environ( path='/', headers={'X_ROLES': role} ) ) class PolicyUtilsTestCase(base.BaseTestCase): def test_roles_list_to_check_str(self): self.assertEqual(roles_list_to_check_str(['test_role']), 'role:test_role') self.assertEqual(roles_list_to_check_str(['role1', 'role2', 'role3']), 'role:role1 or role:role2 or role:role3') self.assertEqual(roles_list_to_check_str(['@']), '@') self.assertEqual(roles_list_to_check_str(['role1', '@', 'role2']), 'role:role1 or @ or role:role2') self.assertIsNone(roles_list_to_check_str(None))
en
0.828335
# Copyright 2016-2017 FUJITSU LIMITED # Copyright 2018 OP5 AB # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.
1.656169
2
tests/unit/test_merged_config_parser.py
jmcgill298/flake8
0
6631534
"""Unit tests for flake8.options.config.MergedConfigParser.""" import os import mock import pytest from flake8.options import config from flake8.options import manager @pytest.fixture def optmanager(): """Generate an OptionManager with simple values.""" return manager.OptionManager(prog='flake8', version='3.0.0a1') @pytest.fixture def config_finder(): """Generate a simple ConfigFileFinder.""" return config.ConfigFileFinder('flake8', [], []) def test_parse_cli_config(optmanager, config_finder): """Parse the specified config file as a cli config file.""" optmanager.add_option('--exclude', parse_from_config=True, comma_separated_list=True, normalize_paths=True) optmanager.add_option('--ignore', parse_from_config=True, comma_separated_list=True) optmanager.add_option('--verbose', parse_from_config=True, action='count') optmanager.add_option('--quiet', parse_from_config=True, action='count') parser = config.MergedConfigParser(optmanager, config_finder) parsed_config = parser.parse_cli_config( 'tests/fixtures/config_files/cli-specified.ini' ) assert parsed_config == { 'ignore': ['E123', 'W234', 'E111'], 'exclude': [ os.path.abspath('foo/'), os.path.abspath('bar/'), os.path.abspath('bogus/'), ], 'verbose': 2, 'quiet': 1, } @pytest.mark.parametrize('filename,is_configured_by', [ ('tests/fixtures/config_files/cli-specified.ini', True), ('tests/fixtures/config_files/no-flake8-section.ini', False), ]) def test_is_configured_by( filename, is_configured_by, optmanager, config_finder): """Verify the behaviour of the is_configured_by method.""" parsed_config, _ = config.ConfigFileFinder._read_config(filename) parser = config.MergedConfigParser(optmanager, config_finder) assert parser.is_configured_by(parsed_config) is is_configured_by def test_parse_user_config(optmanager, config_finder): """Verify parsing of user config files.""" optmanager.add_option('--exclude', parse_from_config=True, comma_separated_list=True, normalize_paths=True) optmanager.add_option('--ignore', parse_from_config=True, comma_separated_list=True) optmanager.add_option('--verbose', parse_from_config=True, action='count') optmanager.add_option('--quiet', parse_from_config=True, action='count') parser = config.MergedConfigParser(optmanager, config_finder) with mock.patch.object(parser.config_finder, 'user_config_file') as usercf: usercf.return_value = 'tests/fixtures/config_files/cli-specified.ini' parsed_config = parser.parse_user_config() assert parsed_config == { 'ignore': ['E123', 'W234', 'E111'], 'exclude': [ os.path.abspath('foo/'), os.path.abspath('bar/'), os.path.abspath('bogus/'), ], 'verbose': 2, 'quiet': 1, } def test_parse_local_config(optmanager, config_finder): """Verify parsing of local config files.""" optmanager.add_option('--exclude', parse_from_config=True, comma_separated_list=True, normalize_paths=True) optmanager.add_option('--ignore', parse_from_config=True, comma_separated_list=True) optmanager.add_option('--verbose', parse_from_config=True, action='count') optmanager.add_option('--quiet', parse_from_config=True, action='count') parser = config.MergedConfigParser(optmanager, config_finder) with mock.patch.object(config_finder, 'local_config_files') as localcfs: localcfs.return_value = [ 'tests/fixtures/config_files/cli-specified.ini' ] parsed_config = parser.parse_local_config() assert parsed_config == { 'ignore': ['E123', 'W234', 'E111'], 'exclude': [ os.path.abspath('foo/'), os.path.abspath('bar/'), os.path.abspath('bogus/'), ], 'verbose': 2, 'quiet': 1, } def test_merge_user_and_local_config(optmanager, config_finder): """Verify merging of parsed user and local config files.""" optmanager.add_option('--exclude', parse_from_config=True, comma_separated_list=True, normalize_paths=True) optmanager.add_option('--ignore', parse_from_config=True, comma_separated_list=True) optmanager.add_option('--select', parse_from_config=True, comma_separated_list=True) parser = config.MergedConfigParser(optmanager, config_finder) with mock.patch.object(config_finder, 'local_config_files') as localcfs: localcfs.return_value = [ 'tests/fixtures/config_files/local-config.ini' ] with mock.patch.object(config_finder, 'user_config_file') as usercf: usercf.return_value = ('tests/fixtures/config_files/' 'user-config.ini') parsed_config = parser.merge_user_and_local_config() assert parsed_config == { 'exclude': [ os.path.abspath('docs/') ], 'ignore': ['D203'], 'select': ['E', 'W', 'F'], } def test_parse_isolates_config(optmanager): """Verify behaviour of the parse method with isolated=True.""" config_finder = mock.MagicMock() parser = config.MergedConfigParser(optmanager, config_finder) assert parser.parse(isolated=True) == {} assert config_finder.local_configs.called is False assert config_finder.user_config.called is False def test_parse_uses_cli_config(optmanager): """Verify behaviour of the parse method with a specified config.""" config_finder = mock.MagicMock() parser = config.MergedConfigParser(optmanager, config_finder) parser.parse(cli_config='foo.ini') config_finder.cli_config.assert_called_once_with('foo.ini') @pytest.mark.parametrize('config_fixture_path', [ 'tests/fixtures/config_files/cli-specified.ini', 'tests/fixtures/config_files/cli-specified-with-inline-comments.ini', 'tests/fixtures/config_files/cli-specified-without-inline-comments.ini', ]) def test_parsed_configs_are_equivalent( optmanager, config_finder, config_fixture_path): """Verify the each file matches the expected parsed output. This is used to ensure our documented behaviour does not regress. """ optmanager.add_option('--exclude', parse_from_config=True, comma_separated_list=True, normalize_paths=True) optmanager.add_option('--ignore', parse_from_config=True, comma_separated_list=True) parser = config.MergedConfigParser(optmanager, config_finder) with mock.patch.object(config_finder, 'local_config_files') as localcfs: localcfs.return_value = [config_fixture_path] with mock.patch.object(config_finder, 'user_config_file') as usercf: usercf.return_value = [] parsed_config = parser.merge_user_and_local_config() assert parsed_config['ignore'] == ['E123', 'W234', 'E111'] assert parsed_config['exclude'] == [ os.path.abspath('foo/'), os.path.abspath('bar/'), os.path.abspath('bogus/'), ] @pytest.mark.parametrize('config_file', [ 'tests/fixtures/config_files/config-with-hyphenated-options.ini' ]) def test_parsed_hyphenated_and_underscored_names( optmanager, config_finder, config_file): """Verify we find hyphenated option names as well as underscored. This tests for options like --max-line-length and --enable-extensions which are able to be specified either as max-line-length or max_line_length in our config files. """ optmanager.add_option('--max-line-length', parse_from_config=True, type='int') optmanager.add_option('--enable-extensions', parse_from_config=True, comma_separated_list=True) parser = config.MergedConfigParser(optmanager, config_finder) with mock.patch.object(config_finder, 'local_config_files') as localcfs: localcfs.return_value = [config_file] with mock.patch.object(config_finder, 'user_config_file') as usercf: usercf.return_value = [] parsed_config = parser.merge_user_and_local_config() assert parsed_config['max_line_length'] == 110 assert parsed_config['enable_extensions'] == ['H101', 'H235']
"""Unit tests for flake8.options.config.MergedConfigParser.""" import os import mock import pytest from flake8.options import config from flake8.options import manager @pytest.fixture def optmanager(): """Generate an OptionManager with simple values.""" return manager.OptionManager(prog='flake8', version='3.0.0a1') @pytest.fixture def config_finder(): """Generate a simple ConfigFileFinder.""" return config.ConfigFileFinder('flake8', [], []) def test_parse_cli_config(optmanager, config_finder): """Parse the specified config file as a cli config file.""" optmanager.add_option('--exclude', parse_from_config=True, comma_separated_list=True, normalize_paths=True) optmanager.add_option('--ignore', parse_from_config=True, comma_separated_list=True) optmanager.add_option('--verbose', parse_from_config=True, action='count') optmanager.add_option('--quiet', parse_from_config=True, action='count') parser = config.MergedConfigParser(optmanager, config_finder) parsed_config = parser.parse_cli_config( 'tests/fixtures/config_files/cli-specified.ini' ) assert parsed_config == { 'ignore': ['E123', 'W234', 'E111'], 'exclude': [ os.path.abspath('foo/'), os.path.abspath('bar/'), os.path.abspath('bogus/'), ], 'verbose': 2, 'quiet': 1, } @pytest.mark.parametrize('filename,is_configured_by', [ ('tests/fixtures/config_files/cli-specified.ini', True), ('tests/fixtures/config_files/no-flake8-section.ini', False), ]) def test_is_configured_by( filename, is_configured_by, optmanager, config_finder): """Verify the behaviour of the is_configured_by method.""" parsed_config, _ = config.ConfigFileFinder._read_config(filename) parser = config.MergedConfigParser(optmanager, config_finder) assert parser.is_configured_by(parsed_config) is is_configured_by def test_parse_user_config(optmanager, config_finder): """Verify parsing of user config files.""" optmanager.add_option('--exclude', parse_from_config=True, comma_separated_list=True, normalize_paths=True) optmanager.add_option('--ignore', parse_from_config=True, comma_separated_list=True) optmanager.add_option('--verbose', parse_from_config=True, action='count') optmanager.add_option('--quiet', parse_from_config=True, action='count') parser = config.MergedConfigParser(optmanager, config_finder) with mock.patch.object(parser.config_finder, 'user_config_file') as usercf: usercf.return_value = 'tests/fixtures/config_files/cli-specified.ini' parsed_config = parser.parse_user_config() assert parsed_config == { 'ignore': ['E123', 'W234', 'E111'], 'exclude': [ os.path.abspath('foo/'), os.path.abspath('bar/'), os.path.abspath('bogus/'), ], 'verbose': 2, 'quiet': 1, } def test_parse_local_config(optmanager, config_finder): """Verify parsing of local config files.""" optmanager.add_option('--exclude', parse_from_config=True, comma_separated_list=True, normalize_paths=True) optmanager.add_option('--ignore', parse_from_config=True, comma_separated_list=True) optmanager.add_option('--verbose', parse_from_config=True, action='count') optmanager.add_option('--quiet', parse_from_config=True, action='count') parser = config.MergedConfigParser(optmanager, config_finder) with mock.patch.object(config_finder, 'local_config_files') as localcfs: localcfs.return_value = [ 'tests/fixtures/config_files/cli-specified.ini' ] parsed_config = parser.parse_local_config() assert parsed_config == { 'ignore': ['E123', 'W234', 'E111'], 'exclude': [ os.path.abspath('foo/'), os.path.abspath('bar/'), os.path.abspath('bogus/'), ], 'verbose': 2, 'quiet': 1, } def test_merge_user_and_local_config(optmanager, config_finder): """Verify merging of parsed user and local config files.""" optmanager.add_option('--exclude', parse_from_config=True, comma_separated_list=True, normalize_paths=True) optmanager.add_option('--ignore', parse_from_config=True, comma_separated_list=True) optmanager.add_option('--select', parse_from_config=True, comma_separated_list=True) parser = config.MergedConfigParser(optmanager, config_finder) with mock.patch.object(config_finder, 'local_config_files') as localcfs: localcfs.return_value = [ 'tests/fixtures/config_files/local-config.ini' ] with mock.patch.object(config_finder, 'user_config_file') as usercf: usercf.return_value = ('tests/fixtures/config_files/' 'user-config.ini') parsed_config = parser.merge_user_and_local_config() assert parsed_config == { 'exclude': [ os.path.abspath('docs/') ], 'ignore': ['D203'], 'select': ['E', 'W', 'F'], } def test_parse_isolates_config(optmanager): """Verify behaviour of the parse method with isolated=True.""" config_finder = mock.MagicMock() parser = config.MergedConfigParser(optmanager, config_finder) assert parser.parse(isolated=True) == {} assert config_finder.local_configs.called is False assert config_finder.user_config.called is False def test_parse_uses_cli_config(optmanager): """Verify behaviour of the parse method with a specified config.""" config_finder = mock.MagicMock() parser = config.MergedConfigParser(optmanager, config_finder) parser.parse(cli_config='foo.ini') config_finder.cli_config.assert_called_once_with('foo.ini') @pytest.mark.parametrize('config_fixture_path', [ 'tests/fixtures/config_files/cli-specified.ini', 'tests/fixtures/config_files/cli-specified-with-inline-comments.ini', 'tests/fixtures/config_files/cli-specified-without-inline-comments.ini', ]) def test_parsed_configs_are_equivalent( optmanager, config_finder, config_fixture_path): """Verify the each file matches the expected parsed output. This is used to ensure our documented behaviour does not regress. """ optmanager.add_option('--exclude', parse_from_config=True, comma_separated_list=True, normalize_paths=True) optmanager.add_option('--ignore', parse_from_config=True, comma_separated_list=True) parser = config.MergedConfigParser(optmanager, config_finder) with mock.patch.object(config_finder, 'local_config_files') as localcfs: localcfs.return_value = [config_fixture_path] with mock.patch.object(config_finder, 'user_config_file') as usercf: usercf.return_value = [] parsed_config = parser.merge_user_and_local_config() assert parsed_config['ignore'] == ['E123', 'W234', 'E111'] assert parsed_config['exclude'] == [ os.path.abspath('foo/'), os.path.abspath('bar/'), os.path.abspath('bogus/'), ] @pytest.mark.parametrize('config_file', [ 'tests/fixtures/config_files/config-with-hyphenated-options.ini' ]) def test_parsed_hyphenated_and_underscored_names( optmanager, config_finder, config_file): """Verify we find hyphenated option names as well as underscored. This tests for options like --max-line-length and --enable-extensions which are able to be specified either as max-line-length or max_line_length in our config files. """ optmanager.add_option('--max-line-length', parse_from_config=True, type='int') optmanager.add_option('--enable-extensions', parse_from_config=True, comma_separated_list=True) parser = config.MergedConfigParser(optmanager, config_finder) with mock.patch.object(config_finder, 'local_config_files') as localcfs: localcfs.return_value = [config_file] with mock.patch.object(config_finder, 'user_config_file') as usercf: usercf.return_value = [] parsed_config = parser.merge_user_and_local_config() assert parsed_config['max_line_length'] == 110 assert parsed_config['enable_extensions'] == ['H101', 'H235']
en
0.72653
Unit tests for flake8.options.config.MergedConfigParser. Generate an OptionManager with simple values. Generate a simple ConfigFileFinder. Parse the specified config file as a cli config file. Verify the behaviour of the is_configured_by method. Verify parsing of user config files. Verify parsing of local config files. Verify merging of parsed user and local config files. Verify behaviour of the parse method with isolated=True. Verify behaviour of the parse method with a specified config. Verify the each file matches the expected parsed output. This is used to ensure our documented behaviour does not regress. Verify we find hyphenated option names as well as underscored. This tests for options like --max-line-length and --enable-extensions which are able to be specified either as max-line-length or max_line_length in our config files.
2.491496
2
DailyProgrammer/DP20120709A.py
DayGitH/Python-Challenges
2
6631535
""" The Fibonacci numbers, which we are all familiar with, start like this: 0,1,1,2,3,5,8,13,21,34,... Where each new number in the sequence is the sum of the previous two. It turns out that by summing different Fibonacci numbers with each other, you can create every single positive integer. In fact, a much stronger statement holds: Every single positive integer can be represented in one and only one way as a sum of non-consecutive Fibonacci numbers. This is called the number's "Zeckendorf representation" [http://en.wikipedia.org/wiki/Zeckendorf%27s_theorem]. For instance, the Zeckendorf representation of the number 100 is 89 + 8 + 3, and the Zeckendorf representation of 1234 is 987 + 233 + 13 + 1. Note that all these numbers are Fibonacci numbers, and that they are non-consecutive (i.e. no two numbers in a Zeckendorf representation can be next to each other in the Fibonacci sequence). There are other ways of summing Fibonacci numbers to get these numbers. For instance, 100 is also equal to 89 + 5 + 3 + 2 + 1, but 1, 2, 3, 5 are all consecutive Fibonacci numbers. If no consecutive Fibonacci numbers are allowed, the representation is unique. Finding the Zeckendorf representation is actually not very hard. Lets use the number 100 as an example of how it's done: First, you find the largest fibonacci number less than or equal to 100. In this case that is 89. This number will always be of the representation, so we remember that number and proceed recursively, and figure out the representation of 100 - 89 = 11. The largest Fibonacci number less than or equal to 11 is 8. We remember that number and proceed recursively with 11 - 8 = 3. 3 is a Fibonacci number itself, so now we're done. The answer is 89 + 8 + 3. Write a program that finds the Zeckendorf representation of different numbers. What is the Zeckendorf representation of 315 ? Thanks to SwimmingPastaDevil for suggesting this problem in /r/dailyprogrammer_ideas! Do you have a problem you think would be good for us? Why not head over there and post it? """ def zeckendorf(target, fib_list): res = [] for f in fib_list[::-1]: if f <= target: res.append(f) target -= f return res def get_fibonacci_list(target): """ returns fibonacci numbers upto less than the target and not including zero""" fib = [1, 1] while fib[-1] < target: fib.append(fib[-1] + fib[-2]) return fib[:-1] def main(): target = 3**15 fib_list = get_fibonacci_list(target) zeck = zeckendorf(target, fib_list) print(zeck) print(' 3**15 = {} \nsum of zeckendorf = {}'.format(3**15, sum(zeck))) if __name__ == "__main__": main()
""" The Fibonacci numbers, which we are all familiar with, start like this: 0,1,1,2,3,5,8,13,21,34,... Where each new number in the sequence is the sum of the previous two. It turns out that by summing different Fibonacci numbers with each other, you can create every single positive integer. In fact, a much stronger statement holds: Every single positive integer can be represented in one and only one way as a sum of non-consecutive Fibonacci numbers. This is called the number's "Zeckendorf representation" [http://en.wikipedia.org/wiki/Zeckendorf%27s_theorem]. For instance, the Zeckendorf representation of the number 100 is 89 + 8 + 3, and the Zeckendorf representation of 1234 is 987 + 233 + 13 + 1. Note that all these numbers are Fibonacci numbers, and that they are non-consecutive (i.e. no two numbers in a Zeckendorf representation can be next to each other in the Fibonacci sequence). There are other ways of summing Fibonacci numbers to get these numbers. For instance, 100 is also equal to 89 + 5 + 3 + 2 + 1, but 1, 2, 3, 5 are all consecutive Fibonacci numbers. If no consecutive Fibonacci numbers are allowed, the representation is unique. Finding the Zeckendorf representation is actually not very hard. Lets use the number 100 as an example of how it's done: First, you find the largest fibonacci number less than or equal to 100. In this case that is 89. This number will always be of the representation, so we remember that number and proceed recursively, and figure out the representation of 100 - 89 = 11. The largest Fibonacci number less than or equal to 11 is 8. We remember that number and proceed recursively with 11 - 8 = 3. 3 is a Fibonacci number itself, so now we're done. The answer is 89 + 8 + 3. Write a program that finds the Zeckendorf representation of different numbers. What is the Zeckendorf representation of 315 ? Thanks to SwimmingPastaDevil for suggesting this problem in /r/dailyprogrammer_ideas! Do you have a problem you think would be good for us? Why not head over there and post it? """ def zeckendorf(target, fib_list): res = [] for f in fib_list[::-1]: if f <= target: res.append(f) target -= f return res def get_fibonacci_list(target): """ returns fibonacci numbers upto less than the target and not including zero""" fib = [1, 1] while fib[-1] < target: fib.append(fib[-1] + fib[-2]) return fib[:-1] def main(): target = 3**15 fib_list = get_fibonacci_list(target) zeck = zeckendorf(target, fib_list) print(zeck) print(' 3**15 = {} \nsum of zeckendorf = {}'.format(3**15, sum(zeck))) if __name__ == "__main__": main()
en
0.909266
The Fibonacci numbers, which we are all familiar with, start like this: 0,1,1,2,3,5,8,13,21,34,... Where each new number in the sequence is the sum of the previous two. It turns out that by summing different Fibonacci numbers with each other, you can create every single positive integer. In fact, a much stronger statement holds: Every single positive integer can be represented in one and only one way as a sum of non-consecutive Fibonacci numbers. This is called the number's "Zeckendorf representation" [http://en.wikipedia.org/wiki/Zeckendorf%27s_theorem]. For instance, the Zeckendorf representation of the number 100 is 89 + 8 + 3, and the Zeckendorf representation of 1234 is 987 + 233 + 13 + 1. Note that all these numbers are Fibonacci numbers, and that they are non-consecutive (i.e. no two numbers in a Zeckendorf representation can be next to each other in the Fibonacci sequence). There are other ways of summing Fibonacci numbers to get these numbers. For instance, 100 is also equal to 89 + 5 + 3 + 2 + 1, but 1, 2, 3, 5 are all consecutive Fibonacci numbers. If no consecutive Fibonacci numbers are allowed, the representation is unique. Finding the Zeckendorf representation is actually not very hard. Lets use the number 100 as an example of how it's done: First, you find the largest fibonacci number less than or equal to 100. In this case that is 89. This number will always be of the representation, so we remember that number and proceed recursively, and figure out the representation of 100 - 89 = 11. The largest Fibonacci number less than or equal to 11 is 8. We remember that number and proceed recursively with 11 - 8 = 3. 3 is a Fibonacci number itself, so now we're done. The answer is 89 + 8 + 3. Write a program that finds the Zeckendorf representation of different numbers. What is the Zeckendorf representation of 315 ? Thanks to SwimmingPastaDevil for suggesting this problem in /r/dailyprogrammer_ideas! Do you have a problem you think would be good for us? Why not head over there and post it? returns fibonacci numbers upto less than the target and not including zero
3.983549
4
src/docs/conf.py
fractal-napari-plugins-collection/user-documentation
0
6631536
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.graphviz', 'sphinx.ext.githubpages', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.viewcode' ] # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Fractal Analytics Platform' copyright = ( u'(c) 2021 <NAME> Institute for Biomedical Research | University of Zurich' ) author = u'<NAME>' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'1.0' # The full version, including alpha/beta/rc tags. release = u'1.0.0' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # These patterns also affect html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # If true, generates auto-summary automatically. autosummary_generate = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' # html_theme = 'nature' html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Customize the HTML context html_context = { 'css_files': [ '_static/style.css', # override wide tables in RTD theme ], } # Customize the HTML side menu html_sidebars = { '**': [ 'globaltoc.html', #'relations.html', 'sourcelink.html', 'searchbox.html' ] } # -- Options for LaTeX output --------------------------------------------- latex_title = u'Fractal Analytics Platform \\\\Napari Plugins' latex_header = u'Fractal Analytics Platform | Napari Plugins' latex_authors = u'<NAME> Institute for Biomedical Research \\and University of Zurich' latex_engine = 'pdflatex' latex_elements = { 'papersize': 'a4paper', 'pointsize': '10pt', 'preamble' : r""" """, 'extraclassoptions': 'openany', # https://github.com/sphinx-doc/sphinx/issues/2622 'maketitle': r""" \noindent\rule{\textwidth}{1pt}\par \begingroup %% for PDF information dictionary \def\endgraf{ }\def\and{\& } \pdfstringdefDisableCommands{\def\\{, }} %% overwrite hyperref setup \hypersetup{ pdfauthor={%s}, pdftitle={%s} } \endgroup \begin{flushright} %% \sphinxlogo \sffamily\bfseries %% \py@HeaderFamily \vspace{75pt} {\Huge %s }\par \vspace{25pt} {\itshape\large Release %s \releaseinfo}\par \vspace{150pt} {\Large \begin{tabular}[t]{c} %s \end{tabular}}\par \vspace{25pt} \today \par \end{flushright} \setcounter{footnote}{0} \let\thanks\relax\let\maketitle\relax """ % ( latex_authors, latex_header, latex_title, release, latex_authors ) } latex_documents = [ ( master_doc, 'fap_napari_plugins.tex', u'Fractal Analytics Platform | Napari Plugins', latex_authors, 'manual', True # toctree_only ), ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.graphviz', 'sphinx.ext.githubpages', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.viewcode' ] # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Fractal Analytics Platform' copyright = ( u'(c) 2021 <NAME> Institute for Biomedical Research | University of Zurich' ) author = u'<NAME>' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'1.0' # The full version, including alpha/beta/rc tags. release = u'1.0.0' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # These patterns also affect html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # If true, generates auto-summary automatically. autosummary_generate = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' # html_theme = 'nature' html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Customize the HTML context html_context = { 'css_files': [ '_static/style.css', # override wide tables in RTD theme ], } # Customize the HTML side menu html_sidebars = { '**': [ 'globaltoc.html', #'relations.html', 'sourcelink.html', 'searchbox.html' ] } # -- Options for LaTeX output --------------------------------------------- latex_title = u'Fractal Analytics Platform \\\\Napari Plugins' latex_header = u'Fractal Analytics Platform | Napari Plugins' latex_authors = u'<NAME> Institute for Biomedical Research \\and University of Zurich' latex_engine = 'pdflatex' latex_elements = { 'papersize': 'a4paper', 'pointsize': '10pt', 'preamble' : r""" """, 'extraclassoptions': 'openany', # https://github.com/sphinx-doc/sphinx/issues/2622 'maketitle': r""" \noindent\rule{\textwidth}{1pt}\par \begingroup %% for PDF information dictionary \def\endgraf{ }\def\and{\& } \pdfstringdefDisableCommands{\def\\{, }} %% overwrite hyperref setup \hypersetup{ pdfauthor={%s}, pdftitle={%s} } \endgroup \begin{flushright} %% \sphinxlogo \sffamily\bfseries %% \py@HeaderFamily \vspace{75pt} {\Huge %s }\par \vspace{25pt} {\itshape\large Release %s \releaseinfo}\par \vspace{150pt} {\Large \begin{tabular}[t]{c} %s \end{tabular}}\par \vspace{25pt} \today \par \end{flushright} \setcounter{footnote}{0} \let\thanks\relax\let\maketitle\relax """ % ( latex_authors, latex_header, latex_title, release, latex_authors ) } latex_documents = [ ( master_doc, 'fap_napari_plugins.tex', u'Fractal Analytics Platform | Napari Plugins', latex_authors, 'manual', True # toctree_only ), ]
en
0.630323
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] # The master toctree document. # General information about the project. # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # The full version, including alpha/beta/rc tags. # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # These patterns also affect html_static_path and html_extra_path # The name of the Pygments (syntax highlighting) style to use. # If true, `todo` and `todoList` produce output, else they produce nothing. # If true, generates auto-summary automatically. # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' # html_theme = 'nature' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # Customize the HTML context # override wide tables in RTD theme # Customize the HTML side menu #'relations.html', # -- Options for LaTeX output --------------------------------------------- # https://github.com/sphinx-doc/sphinx/issues/2622 \noindent\rule{\textwidth}{1pt}\par \begingroup %% for PDF information dictionary \def\endgraf{ }\def\and{\& } \pdfstringdefDisableCommands{\def\\{, }} %% overwrite hyperref setup \hypersetup{ pdfauthor={%s}, pdftitle={%s} } \endgroup \begin{flushright} %% \sphinxlogo \sffamily\bfseries %% \py@HeaderFamily \vspace{75pt} {\Huge %s }\par \vspace{25pt} {\itshape\large Release %s \releaseinfo}\par \vspace{150pt} {\Large \begin{tabular}[t]{c} %s \end{tabular}}\par \vspace{25pt} \today \par \end{flushright} \setcounter{footnote}{0} \let\thanks\relax\let\maketitle\relax # toctree_only
1.646322
2
zeus/api/schemas/stats.py
edgerepo/zeus
0
6631537
<filename>zeus/api/schemas/stats.py from collections import defaultdict from marshmallow import Schema, fields, pre_dump class CoverageStatsSchema(Schema): lines_covered = fields.Integer() lines_uncovered = fields.Integer() diff_lines_covered = fields.Integer() diff_lines_uncovered = fields.Integer() class TestStatsStatsSchema(Schema): count = fields.Integer() failures = fields.Integer() duration = fields.Number() failures_unique = fields.Integer(allow_none=True) count_unique = fields.Integer(allow_none=True) class StyleViolationsStatsSchema(Schema): count = fields.Integer() class BundleStatsSchema(Schema): total_asset_size = fields.Integer() # should be "dumped" with a list of ItemStat instances class StatsSchema(Schema): coverage = fields.Nested(CoverageStatsSchema(), dump_only=True) tests = fields.Nested(TestStatsStatsSchema(), dump_only=True) style_violations = fields.Nested(StyleViolationsStatsSchema(), dump_only=True) bundle = fields.Nested(BundleStatsSchema(), dump_only=True) @pre_dump def process_stats(self, data): result = defaultdict(lambda: defaultdict(int)) for stat in data: bits = stat.name.split(".", 1) if len(bits) != 2: continue result[bits[0]][bits[1]] = stat.value return result
<filename>zeus/api/schemas/stats.py from collections import defaultdict from marshmallow import Schema, fields, pre_dump class CoverageStatsSchema(Schema): lines_covered = fields.Integer() lines_uncovered = fields.Integer() diff_lines_covered = fields.Integer() diff_lines_uncovered = fields.Integer() class TestStatsStatsSchema(Schema): count = fields.Integer() failures = fields.Integer() duration = fields.Number() failures_unique = fields.Integer(allow_none=True) count_unique = fields.Integer(allow_none=True) class StyleViolationsStatsSchema(Schema): count = fields.Integer() class BundleStatsSchema(Schema): total_asset_size = fields.Integer() # should be "dumped" with a list of ItemStat instances class StatsSchema(Schema): coverage = fields.Nested(CoverageStatsSchema(), dump_only=True) tests = fields.Nested(TestStatsStatsSchema(), dump_only=True) style_violations = fields.Nested(StyleViolationsStatsSchema(), dump_only=True) bundle = fields.Nested(BundleStatsSchema(), dump_only=True) @pre_dump def process_stats(self, data): result = defaultdict(lambda: defaultdict(int)) for stat in data: bits = stat.name.split(".", 1) if len(bits) != 2: continue result[bits[0]][bits[1]] = stat.value return result
en
0.874127
# should be "dumped" with a list of ItemStat instances
2.332183
2
plugins/cisco_umbrella_investigate/komand_cisco_umbrella_investigate/actions/samples/action.py
lukaszlaszuk/insightconnect-plugins
46
6631538
import komand from .schema import SamplesInput, SamplesOutput, Input # Custom imports below from komand.exceptions import PluginException class Samples(komand.Action): def __init__(self): super(self.__class__, self).__init__( name="samples", description="Return all samples associated with the domain", input=SamplesInput(), output=SamplesOutput(), ) def run(self, params={}): URL = params.get(Input.URL) limit = params.get("limit", None) offset = params.get("offset", None) sortby = params.get("sortby", None) if not limit or limit == 0: limit = 10 if not sortby or sortby == "": sortby = "score" if not offset: offset = 0 try: samples = self.connection.investigate.samples(URL, limit=limit, offset=offset, sortby=sortby) except Exception as e: raise PluginException(preset=PluginException.Preset.UNKNOWN, data=e) return samples def test(self): return { "limit": 1, "moreDataAvailable": False, "offset": 0, "query": "*", "samples": [], "totalResults": 0, }
import komand from .schema import SamplesInput, SamplesOutput, Input # Custom imports below from komand.exceptions import PluginException class Samples(komand.Action): def __init__(self): super(self.__class__, self).__init__( name="samples", description="Return all samples associated with the domain", input=SamplesInput(), output=SamplesOutput(), ) def run(self, params={}): URL = params.get(Input.URL) limit = params.get("limit", None) offset = params.get("offset", None) sortby = params.get("sortby", None) if not limit or limit == 0: limit = 10 if not sortby or sortby == "": sortby = "score" if not offset: offset = 0 try: samples = self.connection.investigate.samples(URL, limit=limit, offset=offset, sortby=sortby) except Exception as e: raise PluginException(preset=PluginException.Preset.UNKNOWN, data=e) return samples def test(self): return { "limit": 1, "moreDataAvailable": False, "offset": 0, "query": "*", "samples": [], "totalResults": 0, }
en
0.469003
# Custom imports below
2.137325
2
research/slim/plots.py
tathey1/models
0
6631539
<reponame>tathey1/models ''' These functions provide data for ROC and confusion matrix plots from results in txt files It is meant to read data from text files that have been produced by classify.py ''' from sklearn.metrics import confusion_matrix, roc_curve def conf_matrix(data_file): ''' Args: data_file path to text file tab delimited with a single header row then subsequent rows organized as image name, [n class probabilities], predicted class, actual class, correct Returns: nxn array for confusion matrix ''' f = open(data_file,'r') f.readline() #dequeue header line y_true=[] y_pred=[] for line in f: y_true.append(int(line.split()[-2])) y_pred.append(int(line.split()[-3])) return confusion_matrix(y_true,y_pred) def roc_curves(data_file): ''' Args: data_file path to text file (see args for conf_matrix for specifics) Returns: list of n roc curves each roc curve has 2 elements, the fprs and tprs ''' f = open(data_file, 'r') header = f.readline() header_items = header.split() num_classes = len(header_items) - 4 roc_curves = [] y_trues = [[] for i in range(num_classes)] y_scores = [[] for i in range(num_classes)] for line in f: split=line.split() true = int(split[-2]) for c in range(num_classes): y_trues[c].append(1 if true==c else 0) y_scores[c].append(float(split[c+1])) for c in range(num_classes): roc_curves.append(roc_curve(y_trues[c], y_scores[c])) return roc_curves
''' These functions provide data for ROC and confusion matrix plots from results in txt files It is meant to read data from text files that have been produced by classify.py ''' from sklearn.metrics import confusion_matrix, roc_curve def conf_matrix(data_file): ''' Args: data_file path to text file tab delimited with a single header row then subsequent rows organized as image name, [n class probabilities], predicted class, actual class, correct Returns: nxn array for confusion matrix ''' f = open(data_file,'r') f.readline() #dequeue header line y_true=[] y_pred=[] for line in f: y_true.append(int(line.split()[-2])) y_pred.append(int(line.split()[-3])) return confusion_matrix(y_true,y_pred) def roc_curves(data_file): ''' Args: data_file path to text file (see args for conf_matrix for specifics) Returns: list of n roc curves each roc curve has 2 elements, the fprs and tprs ''' f = open(data_file, 'r') header = f.readline() header_items = header.split() num_classes = len(header_items) - 4 roc_curves = [] y_trues = [[] for i in range(num_classes)] y_scores = [[] for i in range(num_classes)] for line in f: split=line.split() true = int(split[-2]) for c in range(num_classes): y_trues[c].append(1 if true==c else 0) y_scores[c].append(float(split[c+1])) for c in range(num_classes): roc_curves.append(roc_curve(y_trues[c], y_scores[c])) return roc_curves
en
0.863315
These functions provide data for ROC and confusion matrix plots from results in txt files It is meant to read data from text files that have been produced by classify.py Args: data_file path to text file tab delimited with a single header row then subsequent rows organized as image name, [n class probabilities], predicted class, actual class, correct Returns: nxn array for confusion matrix #dequeue header line Args: data_file path to text file (see args for conf_matrix for specifics) Returns: list of n roc curves each roc curve has 2 elements, the fprs and tprs
3.592316
4
data_importer/data_importer/util/cameradevice.py
JadeCong/HandControl_MuJoCo
0
6631540
"""Basis for depth camera devices. CameraDevice provides interface for managing depth cameras. It can be used to retrieve basic information and read depth and color frames. Copyright 2015 <NAME>, ICG, Graz University of Technology <<EMAIL>> This file is part of DeepPrior. DeepPrior is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. DeepPrior is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with DeepPrior. If not, see <http://www.gnu.org/licenses/>. """ import time import numpy import cv2 import scipy.misc import lib_dscapture as dsc import openni __author__ = "<NAME> <<EMAIL>>" __copyright__ = "Copyright 2015, ICG, Graz University of Technology, Austria" __credits__ = ["<NAME>"] __license__ = "GPL" __version__ = "1.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Development" class CameraDevice(object): """ Abstract class that handles all camera devices """ def __init__(self, mirror=False): """ Initialize device :param mirror: mirror all images :return: None """ self.mirror = mirror def start(self): """ Start device :return: None """ raise NotImplementedError("!") def stop(self): """ Stop device :return: None """ raise NotImplementedError("!") def saveDepth(self, data, file_name): """ Save data to file, we need special treatment because we have 16bit depth :param data: data :param file_name: file name :return: None """ im = scipy.misc.toimage(data.astype('uint16'), high=numpy.max(data), low=numpy.min(data), mode='I') im.save(file_name+'.png') # read with: b = scipy.misc.imread('my16bit.png') def saveRGB(self, data, file_name): """ Save data to file 3x8bit color :param data: data :param file_name: file name :return: None """ assert len(data.shape) == 3 scipy.misc.imsave(file_name+'.png', data) # read with: b = scipy.misc.imread('my.png') def getDepth(self): """ Return a median smoothed depth image :return: depth data as numpy array """ raise NotImplementedError("!") def getRGB(self): """ Return a bit color image :return: color image as numpy array """ raise NotImplementedError("!") def getGrayScale(self): """ Return a grayscale image :return: grayscale image as numpy array """ raise NotImplementedError("!") def getRGBD(self): """ Return a color + depth image :return: RGB-D image as 4-channel numpy array """ ret_rgb, c = self.getRGB() ret_d, d = self.getDepth() return ret_rgb and ret_d, c.astype('float32'), d.astype('float32') def getLastColorNum(self): """ Get frame number of last color frame :return: frame number """ raise NotImplementedError("!") def getLastDepthNum(self): """ Get frame number of last depth frame :return: frame number """ raise NotImplementedError("!") def getDepthIntrinsics(self): """ Get intrinsic matrix of depth camera :return: 3x3 intrinsic camera matrix """ raise NotImplementedError("!") def getColorIntrinsics(self): """ Get intrinsic matrix of color camera :return: 3x3 intrinsic camera matrix """ raise NotImplementedError("!") def getExtrinsics(self): """ Get extrinsic matrix from color to depth camera :return: 4x3 extrinsic camera matrix """ raise NotImplementedError("!") class CreativeCameraDevice(CameraDevice): """ DepthSense camera class, for Creative Gesture Camera, DS325, etc.""" def __init__(self, mirror=False): """ Initialize device :param mirror: mirror image """ super(CreativeCameraDevice, self).__init__(mirror) def start(self): """ Start device :return: None """ dsc.start() def stop(self): """ Stop device :return: None """ dsc.stop() def getDepth(self): """ Return a median smoothed depth image :return: depth data as numpy array """ if self.mirror: depth = dsc.getDepthMap()[:, ::-1] else: depth = dsc.getDepthMap() depth = cv2.medianBlur(depth, 3) return (numpy.count_nonzero(depth) != 0), numpy.asarray(depth, numpy.float32) def getRGB(self): """ Return a bit color image :return: color image as numpy array """ if self.mirror: image = dsc.getColourMap()[:, ::-1, :] else: image = dsc.getColourMap() return (numpy.count_nonzero(image) != 0), image def getGrayScale(self): """ Return a grayscale image :return: grayscale image as numpy array """ if self.mirror: image = dsc.getColorMap()[:, ::-1, :] else: image = dsc.getColorMap() grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) return (numpy.count_nonzero(grey) != 0), grey.transpose() def getLastColorNum(self): """ Get frame number of last color frame :return: frame number """ return dsc.getLastColorNum() def getLastDepthNum(self): """ Get frame number of last depth frame :return: frame number """ return dsc.getLastDepthNum() def getDepthIntrinsics(self): """ Get intrinsic matrix of depth camera :return: 3x3 intrinsic camera matrix """ return dsc.getDepthIntrinsics() def getColorIntrinsics(self): """ Get intrinsic matrix of color camera :return: 3x3 intrinsic camera matrix """ return dsc.getColorIntrinsics() def getExtrinsics(self): """ Get extrinsic matrix from color to depth camera :return: 4x3 extrinsic camera matrix """ return dsc.getExtrinsics() class DepthSenseCameraDevice(CameraDevice): """ Class for OpenNI based devices, e.g. Kinect, Asus Xtion """ def __init__(self, mirror=False): """ Initialize device :param mirror: mirror image """ super(DepthSenseCameraDevice, self).__init__(mirror) def start(self): """ Stop device :return: None """ self.ctx = openni.Context() self.ctx.init() # Create a depth generator self.depth = openni.DepthGenerator() self.depth.create(self.ctx) # Set it to VGA maps at 30 FPS self.depth.set_resolution_preset(openni.RES_VGA) self.depth.fps = 30 # Create a color generator self.color = openni.ImageGenerator() self.color.create(self.ctx) # Set it to VGA maps at 30 FPS self.color.set_resolution_preset(openni.RES_VGA) self.color.fps = 30 # Start generating self.ctx.start_generating_all() def stop(self): """ Stop device :return: None """ self.ctx.stop_generating_all() self.ctx.shutdown() def getDepth(self): """ Return a median smoothed depth image :return: depth data as numpy array """ # Get the pixel at these coordinates try: # Wait for new data to be available self.ctx.wait_one_update_all(self.depth) except openni.OpenNIError, err: print "Failed updating data:", err else: dpt = numpy.asarray(self.depth.get_tuple_depth_map(), dtype='float32').reshape(self.depth.map.height, self.depth.map.width) return True, dpt def getRGB(self): """ Return a median smoothed depth image :return: depth data as numpy array """ # Get the pixel at these coordinates try: # Wait for new data to be available self.ctx.wait_one_update_all(self.color) except openni.OpenNIError, err: print "Failed updating data:", err else: dpt = numpy.asarray(self.color.get_tuple_depth_map(), dtype='float32').reshape(self.color.map.height, self.color.map.width) return True, dpt class FileDevice(CameraDevice): """ Fake class to load images from file """ def __init__(self, filenames, importer, mirror=False): """ Initialize device :param mirror: mirror all images :return: None """ super(FileDevice, self).__init__(mirror) if not isinstance(filenames, list): raise ValueError("Files must be list of filenames.") self.filenames = filenames self.importer = importer self.depth_intrinsics = importer.getCameraIntrinsics() self.color_intrinsics = numpy.zeros((3, 3)) self.extrinsics = numpy.zeros((3, 4)) self.mirror = mirror self.last_color_num = 0 self.last_depth_num = 0 def start(self): """ Start device :return: None """ pass def stop(self): """ Stop device :return: None """ pass def getDepth(self): """ Return a median smoothed depth image :return: depth data as numpy array """ time.sleep(0.01) frame = self.importer.loadDepthMap(self.filenames[self.last_depth_num]) self.last_depth_num += 1 return True, frame def getRGB(self): """ Return a bit color image :return: color image as numpy array """ raise NotImplementedError("!") def getGrayScale(self): """ Return a grayscale image :return: grayscale image as numpy array """ raise NotImplementedError("!") def getRGBD(self): """ Return a color + depth image :return: RGB-D image as 4-channel numpy array """ ret_rgb, c = self.getRGB() ret_d, d = self.getDepth() return ret_rgb and ret_d, c.astype('float32'), d.astype('float32') def getLastColorNum(self): """ Get frame number of last color frame :return: frame number """ return self.last_color_num def getLastDepthNum(self): """ Get frame number of last depth frame :return: frame number """ return self.last_depth_num def getDepthIntrinsics(self): """ Get intrinsic matrix of depth camera :return: 3x3 intrinsic camera matrix """ return self.depth_intrinsics def getColorIntrinsics(self): """ Get intrinsic matrix of color camera :return: 3x3 intrinsic camera matrix """ return self.color_intrinsics def getExtrinsics(self): """ Get extrinsic matrix from color to depth camera :return: 4x3 extrinsic camera matrix """ return self.extrinsics
"""Basis for depth camera devices. CameraDevice provides interface for managing depth cameras. It can be used to retrieve basic information and read depth and color frames. Copyright 2015 <NAME>, ICG, Graz University of Technology <<EMAIL>> This file is part of DeepPrior. DeepPrior is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. DeepPrior is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with DeepPrior. If not, see <http://www.gnu.org/licenses/>. """ import time import numpy import cv2 import scipy.misc import lib_dscapture as dsc import openni __author__ = "<NAME> <<EMAIL>>" __copyright__ = "Copyright 2015, ICG, Graz University of Technology, Austria" __credits__ = ["<NAME>"] __license__ = "GPL" __version__ = "1.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Development" class CameraDevice(object): """ Abstract class that handles all camera devices """ def __init__(self, mirror=False): """ Initialize device :param mirror: mirror all images :return: None """ self.mirror = mirror def start(self): """ Start device :return: None """ raise NotImplementedError("!") def stop(self): """ Stop device :return: None """ raise NotImplementedError("!") def saveDepth(self, data, file_name): """ Save data to file, we need special treatment because we have 16bit depth :param data: data :param file_name: file name :return: None """ im = scipy.misc.toimage(data.astype('uint16'), high=numpy.max(data), low=numpy.min(data), mode='I') im.save(file_name+'.png') # read with: b = scipy.misc.imread('my16bit.png') def saveRGB(self, data, file_name): """ Save data to file 3x8bit color :param data: data :param file_name: file name :return: None """ assert len(data.shape) == 3 scipy.misc.imsave(file_name+'.png', data) # read with: b = scipy.misc.imread('my.png') def getDepth(self): """ Return a median smoothed depth image :return: depth data as numpy array """ raise NotImplementedError("!") def getRGB(self): """ Return a bit color image :return: color image as numpy array """ raise NotImplementedError("!") def getGrayScale(self): """ Return a grayscale image :return: grayscale image as numpy array """ raise NotImplementedError("!") def getRGBD(self): """ Return a color + depth image :return: RGB-D image as 4-channel numpy array """ ret_rgb, c = self.getRGB() ret_d, d = self.getDepth() return ret_rgb and ret_d, c.astype('float32'), d.astype('float32') def getLastColorNum(self): """ Get frame number of last color frame :return: frame number """ raise NotImplementedError("!") def getLastDepthNum(self): """ Get frame number of last depth frame :return: frame number """ raise NotImplementedError("!") def getDepthIntrinsics(self): """ Get intrinsic matrix of depth camera :return: 3x3 intrinsic camera matrix """ raise NotImplementedError("!") def getColorIntrinsics(self): """ Get intrinsic matrix of color camera :return: 3x3 intrinsic camera matrix """ raise NotImplementedError("!") def getExtrinsics(self): """ Get extrinsic matrix from color to depth camera :return: 4x3 extrinsic camera matrix """ raise NotImplementedError("!") class CreativeCameraDevice(CameraDevice): """ DepthSense camera class, for Creative Gesture Camera, DS325, etc.""" def __init__(self, mirror=False): """ Initialize device :param mirror: mirror image """ super(CreativeCameraDevice, self).__init__(mirror) def start(self): """ Start device :return: None """ dsc.start() def stop(self): """ Stop device :return: None """ dsc.stop() def getDepth(self): """ Return a median smoothed depth image :return: depth data as numpy array """ if self.mirror: depth = dsc.getDepthMap()[:, ::-1] else: depth = dsc.getDepthMap() depth = cv2.medianBlur(depth, 3) return (numpy.count_nonzero(depth) != 0), numpy.asarray(depth, numpy.float32) def getRGB(self): """ Return a bit color image :return: color image as numpy array """ if self.mirror: image = dsc.getColourMap()[:, ::-1, :] else: image = dsc.getColourMap() return (numpy.count_nonzero(image) != 0), image def getGrayScale(self): """ Return a grayscale image :return: grayscale image as numpy array """ if self.mirror: image = dsc.getColorMap()[:, ::-1, :] else: image = dsc.getColorMap() grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) return (numpy.count_nonzero(grey) != 0), grey.transpose() def getLastColorNum(self): """ Get frame number of last color frame :return: frame number """ return dsc.getLastColorNum() def getLastDepthNum(self): """ Get frame number of last depth frame :return: frame number """ return dsc.getLastDepthNum() def getDepthIntrinsics(self): """ Get intrinsic matrix of depth camera :return: 3x3 intrinsic camera matrix """ return dsc.getDepthIntrinsics() def getColorIntrinsics(self): """ Get intrinsic matrix of color camera :return: 3x3 intrinsic camera matrix """ return dsc.getColorIntrinsics() def getExtrinsics(self): """ Get extrinsic matrix from color to depth camera :return: 4x3 extrinsic camera matrix """ return dsc.getExtrinsics() class DepthSenseCameraDevice(CameraDevice): """ Class for OpenNI based devices, e.g. Kinect, Asus Xtion """ def __init__(self, mirror=False): """ Initialize device :param mirror: mirror image """ super(DepthSenseCameraDevice, self).__init__(mirror) def start(self): """ Stop device :return: None """ self.ctx = openni.Context() self.ctx.init() # Create a depth generator self.depth = openni.DepthGenerator() self.depth.create(self.ctx) # Set it to VGA maps at 30 FPS self.depth.set_resolution_preset(openni.RES_VGA) self.depth.fps = 30 # Create a color generator self.color = openni.ImageGenerator() self.color.create(self.ctx) # Set it to VGA maps at 30 FPS self.color.set_resolution_preset(openni.RES_VGA) self.color.fps = 30 # Start generating self.ctx.start_generating_all() def stop(self): """ Stop device :return: None """ self.ctx.stop_generating_all() self.ctx.shutdown() def getDepth(self): """ Return a median smoothed depth image :return: depth data as numpy array """ # Get the pixel at these coordinates try: # Wait for new data to be available self.ctx.wait_one_update_all(self.depth) except openni.OpenNIError, err: print "Failed updating data:", err else: dpt = numpy.asarray(self.depth.get_tuple_depth_map(), dtype='float32').reshape(self.depth.map.height, self.depth.map.width) return True, dpt def getRGB(self): """ Return a median smoothed depth image :return: depth data as numpy array """ # Get the pixel at these coordinates try: # Wait for new data to be available self.ctx.wait_one_update_all(self.color) except openni.OpenNIError, err: print "Failed updating data:", err else: dpt = numpy.asarray(self.color.get_tuple_depth_map(), dtype='float32').reshape(self.color.map.height, self.color.map.width) return True, dpt class FileDevice(CameraDevice): """ Fake class to load images from file """ def __init__(self, filenames, importer, mirror=False): """ Initialize device :param mirror: mirror all images :return: None """ super(FileDevice, self).__init__(mirror) if not isinstance(filenames, list): raise ValueError("Files must be list of filenames.") self.filenames = filenames self.importer = importer self.depth_intrinsics = importer.getCameraIntrinsics() self.color_intrinsics = numpy.zeros((3, 3)) self.extrinsics = numpy.zeros((3, 4)) self.mirror = mirror self.last_color_num = 0 self.last_depth_num = 0 def start(self): """ Start device :return: None """ pass def stop(self): """ Stop device :return: None """ pass def getDepth(self): """ Return a median smoothed depth image :return: depth data as numpy array """ time.sleep(0.01) frame = self.importer.loadDepthMap(self.filenames[self.last_depth_num]) self.last_depth_num += 1 return True, frame def getRGB(self): """ Return a bit color image :return: color image as numpy array """ raise NotImplementedError("!") def getGrayScale(self): """ Return a grayscale image :return: grayscale image as numpy array """ raise NotImplementedError("!") def getRGBD(self): """ Return a color + depth image :return: RGB-D image as 4-channel numpy array """ ret_rgb, c = self.getRGB() ret_d, d = self.getDepth() return ret_rgb and ret_d, c.astype('float32'), d.astype('float32') def getLastColorNum(self): """ Get frame number of last color frame :return: frame number """ return self.last_color_num def getLastDepthNum(self): """ Get frame number of last depth frame :return: frame number """ return self.last_depth_num def getDepthIntrinsics(self): """ Get intrinsic matrix of depth camera :return: 3x3 intrinsic camera matrix """ return self.depth_intrinsics def getColorIntrinsics(self): """ Get intrinsic matrix of color camera :return: 3x3 intrinsic camera matrix """ return self.color_intrinsics def getExtrinsics(self): """ Get extrinsic matrix from color to depth camera :return: 4x3 extrinsic camera matrix """ return self.extrinsics
en
0.718079
Basis for depth camera devices. CameraDevice provides interface for managing depth cameras. It can be used to retrieve basic information and read depth and color frames. Copyright 2015 <NAME>, ICG, Graz University of Technology <<EMAIL>> This file is part of DeepPrior. DeepPrior is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. DeepPrior is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with DeepPrior. If not, see <http://www.gnu.org/licenses/>. Abstract class that handles all camera devices Initialize device :param mirror: mirror all images :return: None Start device :return: None Stop device :return: None Save data to file, we need special treatment because we have 16bit depth :param data: data :param file_name: file name :return: None # read with: b = scipy.misc.imread('my16bit.png') Save data to file 3x8bit color :param data: data :param file_name: file name :return: None # read with: b = scipy.misc.imread('my.png') Return a median smoothed depth image :return: depth data as numpy array Return a bit color image :return: color image as numpy array Return a grayscale image :return: grayscale image as numpy array Return a color + depth image :return: RGB-D image as 4-channel numpy array Get frame number of last color frame :return: frame number Get frame number of last depth frame :return: frame number Get intrinsic matrix of depth camera :return: 3x3 intrinsic camera matrix Get intrinsic matrix of color camera :return: 3x3 intrinsic camera matrix Get extrinsic matrix from color to depth camera :return: 4x3 extrinsic camera matrix DepthSense camera class, for Creative Gesture Camera, DS325, etc. Initialize device :param mirror: mirror image Start device :return: None Stop device :return: None Return a median smoothed depth image :return: depth data as numpy array Return a bit color image :return: color image as numpy array Return a grayscale image :return: grayscale image as numpy array Get frame number of last color frame :return: frame number Get frame number of last depth frame :return: frame number Get intrinsic matrix of depth camera :return: 3x3 intrinsic camera matrix Get intrinsic matrix of color camera :return: 3x3 intrinsic camera matrix Get extrinsic matrix from color to depth camera :return: 4x3 extrinsic camera matrix Class for OpenNI based devices, e.g. Kinect, Asus Xtion Initialize device :param mirror: mirror image Stop device :return: None # Create a depth generator # Set it to VGA maps at 30 FPS # Create a color generator # Set it to VGA maps at 30 FPS # Start generating Stop device :return: None Return a median smoothed depth image :return: depth data as numpy array # Get the pixel at these coordinates # Wait for new data to be available Return a median smoothed depth image :return: depth data as numpy array # Get the pixel at these coordinates # Wait for new data to be available Fake class to load images from file Initialize device :param mirror: mirror all images :return: None Start device :return: None Stop device :return: None Return a median smoothed depth image :return: depth data as numpy array Return a bit color image :return: color image as numpy array Return a grayscale image :return: grayscale image as numpy array Return a color + depth image :return: RGB-D image as 4-channel numpy array Get frame number of last color frame :return: frame number Get frame number of last depth frame :return: frame number Get intrinsic matrix of depth camera :return: 3x3 intrinsic camera matrix Get intrinsic matrix of color camera :return: 3x3 intrinsic camera matrix Get extrinsic matrix from color to depth camera :return: 4x3 extrinsic camera matrix
2.28507
2
tests/snapshots/snap_test_NotebookScripter.py
breathe/NotebookScripter
23
6631541
<gh_stars>10-100 # -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc from __future__ import unicode_literals from snapshottest import Snapshot snapshots = Snapshot() snapshots['TestNotebookExecution::test_magics_are_unregistered 1'] = 'matplotlib' snapshots['TestNotebookExecution::test_run_notebook 1'] = 'Hello default world' snapshots['TestNotebookExecution::test_run_notebook_in_jupyter 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'parameterized_name': 'default world' } snapshots['TestNotebookExecution::test_run_notebook_in_jupyter_with_hooks 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'greeting_string': 'Hello {0}', 'parameterized_name': 'external world' } snapshots['TestNotebookExecution::test_run_notebook_in_jupyter_with_hooks 2'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': True, 'greeting_string': 'Salut {0}', 'parameterized_name': 'external world2' } snapshots['TestNotebookExecution::test_run_notebook_in_process 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'parameterized_name': 'default world' } snapshots['TestNotebookExecution::test_run_notebook_in_process_with_hooks 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'greeting_string': 'Hello {0}', 'parameterized_name': 'external world' } snapshots['TestNotebookExecution::test_run_notebook_in_process_with_hooks 2'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': True, 'greeting_string': 'Salut {0}', 'parameterized_name': 'external world2' } snapshots['TestNotebookExecution::test_run_notebook_with_hooks1 1'] = 'Hello external world' snapshots['TestNotebookExecution::test_run_notebook_with_hooks2 1'] = 'Salut external world2' snapshots['TestWorkerExecution::test_worker 1'] = { 'french_mode': None, 'greeting_string': 'Hello {0}', 'parameterized_name': 'external world' } snapshots['TestExecutePyFileAsNotebook::test_magics_are_unregistered 1'] = 'matplotlib' snapshots['TestExecutePyFileAsNotebook::test_run_notebook 1'] = 'Hello default world' snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_jupyter 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'parameterized_name': 'default world' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_jupyter_with_hooks 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'greeting_string': 'Hello {0}', 'parameterized_name': 'external world' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_jupyter_with_hooks 2'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': True, 'greeting_string': 'Salut {0}', 'parameterized_name': 'external world2' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_process 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'parameterized_name': 'default world' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_process_with_hooks 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'greeting_string': 'Hello {0}', 'parameterized_name': 'external world' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_process_with_hooks 2'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': True, 'greeting_string': 'Salut {0}', 'parameterized_name': 'external world2' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_with_hooks1 1'] = 'Hello external world' snapshots['TestExecutePyFileAsNotebook::test_run_notebook_with_hooks2 1'] = 'Salut external world2' snapshots['TestRecursiveNotebookExecution::test_run 1'] = 'Case 1 Expecting a string' snapshots['TestRecursiveNotebookExecution::test_run 2'] = 'Case 2 Expecting a string'
# -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc from __future__ import unicode_literals from snapshottest import Snapshot snapshots = Snapshot() snapshots['TestNotebookExecution::test_magics_are_unregistered 1'] = 'matplotlib' snapshots['TestNotebookExecution::test_run_notebook 1'] = 'Hello default world' snapshots['TestNotebookExecution::test_run_notebook_in_jupyter 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'parameterized_name': 'default world' } snapshots['TestNotebookExecution::test_run_notebook_in_jupyter_with_hooks 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'greeting_string': 'Hello {0}', 'parameterized_name': 'external world' } snapshots['TestNotebookExecution::test_run_notebook_in_jupyter_with_hooks 2'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': True, 'greeting_string': 'Salut {0}', 'parameterized_name': 'external world2' } snapshots['TestNotebookExecution::test_run_notebook_in_process 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'parameterized_name': 'default world' } snapshots['TestNotebookExecution::test_run_notebook_in_process_with_hooks 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'greeting_string': 'Hello {0}', 'parameterized_name': 'external world' } snapshots['TestNotebookExecution::test_run_notebook_in_process_with_hooks 2'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': True, 'greeting_string': 'Salut {0}', 'parameterized_name': 'external world2' } snapshots['TestNotebookExecution::test_run_notebook_with_hooks1 1'] = 'Hello external world' snapshots['TestNotebookExecution::test_run_notebook_with_hooks2 1'] = 'Salut external world2' snapshots['TestWorkerExecution::test_worker 1'] = { 'french_mode': None, 'greeting_string': 'Hello {0}', 'parameterized_name': 'external world' } snapshots['TestExecutePyFileAsNotebook::test_magics_are_unregistered 1'] = 'matplotlib' snapshots['TestExecutePyFileAsNotebook::test_run_notebook 1'] = 'Hello default world' snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_jupyter 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'parameterized_name': 'default world' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_jupyter_with_hooks 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'greeting_string': 'Hello {0}', 'parameterized_name': 'external world' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_jupyter_with_hooks 2'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': True, 'greeting_string': 'Salut {0}', 'parameterized_name': 'external world2' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_process 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'parameterized_name': 'default world' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_process_with_hooks 1'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': None, 'greeting_string': 'Hello {0}', 'parameterized_name': 'external world' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_in_process_with_hooks 2'] = { '__doc__': None, '__loader__': None, '__name__': 'loaded_notebook_from_subprocess', '__package__': None, '__spec__': None, 'french_mode': True, 'greeting_string': 'Salut {0}', 'parameterized_name': 'external world2' } snapshots['TestExecutePyFileAsNotebook::test_run_notebook_with_hooks1 1'] = 'Hello external world' snapshots['TestExecutePyFileAsNotebook::test_run_notebook_with_hooks2 1'] = 'Salut external world2' snapshots['TestRecursiveNotebookExecution::test_run 1'] = 'Case 1 Expecting a string' snapshots['TestRecursiveNotebookExecution::test_run 2'] = 'Case 2 Expecting a string'
en
0.629015
# -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc
1.774753
2
ceph_installer/controllers/status.py
ceph/ceph-installer
16
6631542
from pecan import response, expose from ceph_installer.hooks import system_checks, SystemCheckError class StatusController(object): @expose('json') def index(self): for check in system_checks: try: check() except SystemCheckError as system_error: response.status = 500 return {'message': system_error.message} return dict(message="ok")
from pecan import response, expose from ceph_installer.hooks import system_checks, SystemCheckError class StatusController(object): @expose('json') def index(self): for check in system_checks: try: check() except SystemCheckError as system_error: response.status = 500 return {'message': system_error.message} return dict(message="ok")
none
1
2.241942
2
fat.py
JG-OLIVEIRA/math_repo
0
6631543
<reponame>JG-OLIVEIRA/math_repo def fat(value): if value > 1: return value * fat(value - 1) return 1 print(fat(5)/ fat(3) * fat(2)) print(fat(4)) print(fat(3)) print(fat(2)) print(fat(1))
def fat(value): if value > 1: return value * fat(value - 1) return 1 print(fat(5)/ fat(3) * fat(2)) print(fat(4)) print(fat(3)) print(fat(2)) print(fat(1))
none
1
3.599607
4
summarizemultistability.py
BenNordick/HiLoop
1
6631544
import argparse import collections import colorsys import copy import cycler import json import matplotlib.collections as mplcollect import matplotlib.colors as mplcolors import matplotlib.patches as mplpatch import matplotlib.pyplot as plt import matplotlib.ticker as mpltick import mpl_toolkits.axes_grid1.inset_locator as mptinset import numpy as np import random import scipy.cluster as spcluster import seaborn as sns from sklearn import decomposition, neighbors # Creates visualizations from JSON reports generated by multistability.py def isoscillator(attractor): """Determine whether the given attractor value is an oscillatory attractor.""" return isinstance(attractor, dict) def caricatureattractor(attractor): """Turn the given attractor information value (which might be an oscillation) into a single list, for comparison.""" if isoscillator(attractor): return list(np.mean(attractor['orbit'], axis=0)) if 'orbit' in attractor else [(s['max'] + s['min']) / 2 for s in attractor['species']] else: return list(attractor) def caricatureattractors(attractors): """Caricature each species in the given attractor set (nested list).""" return [caricatureattractor(a) for a in attractors] def summarizeattractors(pset_report): """Create a 2-tuple summarizing a set of attractors: attractor count, monotonic species count.""" attractors = caricatureattractors(pset_report['attractors']) species = len(attractors[0]) correlated_species = set() most_monotonic_species = 0 for i in range(species): if i in correlated_species: continue sorted_attractors = sorted(attractors, key=lambda a: a[i]) correlated_species.add(i) monotonic_species = 1 for j in set(range(species)).difference(correlated_species): attractor_concs = [a[j] for a in sorted_attractors] if attractor_concs == sorted(attractor_concs) or attractor_concs == sorted(attractor_concs, reverse=True): monotonic_species += 1 correlated_species.add(j) most_monotonic_species = max(most_monotonic_species, monotonic_species) return len(attractors), most_monotonic_species def categorizeattractors(report): """Create a dictionary of attractor summary tuples to lists of their occurrences from all parameter sets in the report.""" summary_occurrences = collections.defaultdict(list) pset_list = report['psets'] if isinstance(report, dict) else report for pset in pset_list: summary_occurrences[summarizeattractors(pset)].append(pset) return summary_occurrences def specificrulevalue(ruleset, summary, default=None): """ Determine the most specific policy for a system with the given multiattractor summary. The ruleset is a dict of rules, where each key is an aspect of varying specificity: - 2-tuples are the most specific and match systems with that summary. - Integers are less specific and match systems with that number of distinct attractors. - The None key indicates the default rule. """ specific = None attractors, monotonic = summary if summary in ruleset: specific = summary elif attractors in ruleset: specific = attractors return ruleset[specific] if specific in ruleset else default def applydownsample(summary_occurrences, downsample): """ Downsample a categorized collection of systems according to a ruleset. Arguments: - summary_occurrences: dict produced by categorizeattractors - downsample: system ruleset where the values are the number of each system type to keep (if int) or percent of systems to keep (if string ending in '%') Returns a flat list of pset/system reports. """ filtered_psets = [] for summary, occurrences in summary_occurrences.items(): n_psets = None if downsample is not None: limit_rule = specificrulevalue(downsample, summary, default=len(occurrences)) if isinstance(limit_rule, int): n_psets = limit_rule else: percent = float(limit_rule.split('%')[0]) n_psets = int(np.ceil(percent * len(occurrences) / 100)) if n_psets is None or n_psets >= len(occurrences): filtered_psets.extend(occurrences) else: filtered_psets.extend(random.sample(occurrences, n_psets)) return filtered_psets def plotmultistability(report, figsize=None, label_counts=False, colorbar=True): """ Set up a multistability table in the current pyplot. Arguments: - report: full parameter sampling report (likely deserialized from JSON) - figsize: figure size as a tuple of inches (width by height) - label_counts: whether to label cells with the count of systems - colorbar: whether to show a colorbar for the cell intensities/colors """ summary_occurrences = categorizeattractors(report) max_attractors = max(s[0] for s in summary_occurrences.keys()) min_attractors = min(s[0] for s in summary_occurrences.keys()) max_monotonic = len(report['species_names']) min_monotonic = 1 width = max_attractors - min_attractors + 1 x_range = range(min_attractors, max_attractors + 1) height = max_monotonic - min_monotonic + 1 y_range = reversed(range(min_monotonic, max_monotonic + 1)) heatmap_pixels = np.zeros((height, width), dtype=int) oscillators = np.zeros((height, width), dtype=int) for summary, occurrences in summary_occurrences.items(): x = summary[0] - min_attractors y = max_monotonic - summary[1] heatmap_pixels[y][x] = len(occurrences) oscillators[y][x] = sum(1 for oc in occurrences if any(isoscillator(at) for at in oc['attractors'])) fig, ax = plt.subplots(figsize=figsize) im = ax.imshow(heatmap_pixels, norm=mplcolors.LogNorm(vmax=heatmap_pixels.max())) if colorbar: fig.colorbar(im) ax.set_xticks(range(width)) ax.set_yticks(range(height)) ax.set_xticklabels([str(n) for n in x_range]) ax.set_yticklabels([str(n) for n in y_range]) ax.set_xlabel('Attractors') ax.set_ylabel('Monotonically correlated species') if label_counts: for y in range(height): for x in range(width): if heatmap_pixels[y][x] > 0: text = str(heatmap_pixels[y][x]) if oscillators[y][x] > 0: text = f'{text}\n({oscillators[y][x]} osc.)' ax.text(x, y, text, ha='center', va='center', color='gray') def plotattractors(report, reduction, figsize=None, labelsize=None, connect_psets=False, contour=False, downsample=None, density_downsample=None, focus=None, focus_osc=False, hide_defocused=False, color_code=False, square=False): """ Set up a hexbin or scatter-line plot in the current pyplot. Arguments: - report: full parameter sampling report - reduction: how to map concentration values to 2D space: an instance of e.g. PCA2D or AverageLog - figsize: figure size as a tuple of inches (width by height) - labelsize: font size for axis labels - connect_psets: whether to make a scatter-line plot instead of a hexbin plot - contour: proportion of density outside the lowest contour level, or False to not add contour lines - downsample: ruleset to downsample systems for display - density_downsample: ruleset to downsample systems for contour/density estimation - focus: Boolean-valued ruleset to focus systems (scatter-line only, default all focused) - focus_osc: whether to focus systems containing oscillators (scatter-line only, will defocus all others if focus not set) - hide_defocused: whether to hide all non-focused systems (scatter-line only) - color_code: whether to color lines by system type (scatter-line only) - square: whether to force a square plot """ reduction.prepare(report) random.seed(1) summary_occurrences = categorizeattractors(report) filtered_psets = applydownsample(summary_occurrences, downsample) points = reduction.reduce(psets_matrix(filtered_psets)) xlabel, ylabel = reduction.labels() fig, ax_main = plt.subplots(figsize=figsize) if connect_psets: distinct_summaries = list(categorizeattractors(filtered_psets).keys()) default_cycle = cycler.cycler(color=['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:gray', 'tab:olive', 'tab:cyan']) default_cycler = default_cycle() defocus_default_cycler = plt.rcParams['axes.prop_cycle']() for i, pset in enumerate(filtered_psets): pset_matrix = np.array(caricatureattractors(pset['attractors'])) pset_xy = reduction.reduce(pset_matrix) sorted_attractors = pset_xy[pset_matrix[:, 0].argsort(), :] point_mask = [not isoscillator(a) for a in pset['attractors']] has_oscillator = not all(point_mask) z = i linewidth = None oscwidth = 1.6 dotsize = 36.0 defocused = False summary = summarizeattractors(pset) if focus or focus_osc: if (focus_osc and has_oscillator) or (focus and specificrulevalue(focus, summary, default=False)): z += len(filtered_psets) + 1 elif hide_defocused: continue else: linewidth = 0.8 oscwidth = 1.1 dotsize = 10.0 defocused = True if color_code: hue, sat, lum, hue_vary_width = summaryhsl(distinct_summaries, summary) hue += random.uniform(0, hue_vary_width) if not defocused: lum *= random.uniform(0.85, 1.1) sat *= random.uniform(0.8, 1.0) elif defocused: next_prop = next(defocus_default_cycler) color_spec = next_prop['color'] r, g, b = mplcolors.to_rgb(color_spec) hue, sat, lum = colorsys.rgb_to_hls(r, g, b) if defocused: lum = min(1 - (1 - lum) * random.uniform(0.3, 0.5), 0.9) sat *= random.uniform(0.35, 0.45) if color_code or defocused: pset_color = colorsys.hls_to_rgb(hue, lum, sat) else: pset_color = next(default_cycler)['color'] ax_main.plot(sorted_attractors[:, 0], sorted_attractors[:, 1], lw=linewidth, color=pset_color, zorder=z) pointprops = {'s': dotsize} if defocused or not contour else {'linewidths': 1.0, 'edgecolors': 'white', 's': dotsize * 1.3} ax_main.scatter(pset_xy[point_mask, 0], pset_xy[point_mask, 1], color=pset_color, zorder=z, **pointprops) for osc in (a for a in pset['attractors'] if isoscillator(a)): vertices = np.array(osc['orbit']) projected_vertices = reduction.reduce(vertices) if projected_vertices.shape[0] >= 3: projected_vertices = np.vstack((projected_vertices, projected_vertices[0, :])) polygon = mplpatch.Polygon(projected_vertices, color=pset_color, linewidth=oscwidth, linestyle='--', fill=False, zorder=z) ax_main.add_patch(polygon) else: cmap = copy.copy(plt.get_cmap('viridis')) cmap.set_under('white', 1.0) hex_args = {'linewidths': 0.2, 'norm': mplcolors.LogNorm(vmin=2), 'cmap': cmap, 'gridsize': 40} bin_results = ax_main.hexbin(points[:, 0], points[:, 1], **hex_args) fig.colorbar(bin_results, ax=ax_main, label='Attractors') if contour: random.seed(1) density_filtered_psets = applydownsample(summary_occurrences, density_downsample) density_points = reduction.reduce(psets_matrix(density_filtered_psets)) kde = neighbors.KernelDensity(kernel='gaussian', bandwidth=0.1).fit(density_points) bin_x, bin_y = np.mgrid[(density_points[:, 0].min() - 0.15):(density_points[:, 0].max() + 0.15):80j, (density_points[:, 1].min() - 0.15):(density_points[:, 1].max() + 0.15):80j] density = np.exp(kde.score_samples(np.vstack((bin_x.flatten(), bin_y.flatten())).T)) sorted_densities = np.sort(density.flatten()) total_density = np.sum(sorted_densities) cdf = np.cumsum(sorted_densities) / total_density if connect_psets: cutoff_indices = [np.where(cdf > percentile)[0][0] for percentile in np.linspace(contour, 1, 5)[:-1]] levels = [sorted_densities[c] for c in cutoff_indices] + [total_density] colors = ['#c65ff560', '#af36e388', '#b300ff90', '#8500e2a0'] ax_main.contourf(bin_x, bin_y, density.reshape(bin_x.shape), levels, colors=colors, zorder=len(filtered_psets)) else: cutoff_indices = [np.where(cdf > percentile)[0][0] for percentile in np.linspace(contour, 0.9, 6)] levels = [sorted_densities[c] for c in cutoff_indices] widths = np.linspace(0.5, 1.4, 6) ax_main.contour(bin_x, bin_y, density.reshape(bin_x.shape), levels, linewidths=widths, colors='black', zorder=(len(filtered_psets) * 3), alpha=0.6) if square: ax_main.axis('square') elif reduction.equalscale(): ax_main.axis('equal') if reduction.zerobased('x'): ax_main.set_xlim(left=0) if reduction.zerobased('y'): ax_main.set_ylim(bottom=0) locator_base = reduction.locatorbase() if locator_base is not None: ax_main.xaxis.set_major_locator(mpltick.MultipleLocator(base=locator_base)) ax_main.yaxis.set_major_locator(mpltick.MultipleLocator(base=locator_base)) x_text = ax_main.set_xlabel(xlabel) if labelsize is not None: x_text.set_fontsize(labelsize) y_text = ax_main.set_ylabel(ylabel) if labelsize is not None: y_text.set_fontsize(labelsize) def psets_matrix(psets): """Create a NumPy 2D array of all attractors in all given parameter set reports.""" full_matrix = None for pset in psets: numeric_attractors = np.array(caricatureattractors(pset['attractors'])) if full_matrix is None: full_matrix = numeric_attractors else: full_matrix = np.vstack((full_matrix, numeric_attractors)) return full_matrix class PCA2D(): """Reduction that puts the first principal component on the X axis and second on the Y axis.""" def __init__(self): self.pca = decomposition.PCA(n_components=2) def prepare(self, report): self.pca.fit(psets_matrix(report['psets'])) def reduce(self, matrix): return self.pca.transform(matrix) def labels(self): return 'PC1', 'PC2' def zerobased(self, axis): return False def locatorbase(self): return 1.0 def equalscale(self): return True class AverageLog(): """Reduction that puts specified species' concentrations on each axis, averaging the logs if multiple species go on one axis.""" def __init__(self, settings=None): """ Specify the reduction axes as a string. X and Y axes' settings are separated by a slash. Each axis' settings is a comma-separated list of species names. A species name can be prefixed with a dash to invert it. """ self.settings = settings def prepare(self, report): self.names = report['species_names'] if self.settings is None: raise NotImplementedError('You must specify genes for reduction axes') else: x, y = self.settings.split('/') self.x_components = [self._parsecomponent(report, c.strip()) for c in x.split(',')] self.y_components = [self._parsecomponent(report, c.strip()) for c in y.split(',')] def reduce(self, matrix): return np.stack((self._componentwisereduce(matrix, self.x_components), self._componentwisereduce(matrix, self.y_components)), 1) def labels(self): return ', '.join((self._componentname(c) for c in self.x_components)), ', '.join((self._componentname(c) for c in self.y_components)) def zerobased(self, axis): axis_components = self.x_components if axis == 'x' else self.y_components return len(axis_components) == 1 and axis_components[0][1] == 1 def locatorbase(self): return 0.5 if self.equalscale() else None def equalscale(self): return self.zerobased('x') and self.zerobased('y') def _parsecomponent(self, report, text): if text.startswith('-'): text = text[1:] factor = -1 else: factor = 1 index = self.names.index(text) if text in self.names else self.names.index(f'X_{text}') return index, factor def _componentwisereduce(self, matrix, components): results = None for index, factor in components: component_log = np.log(matrix[:, index]) * factor if results is None: results = component_log else: results += component_log return np.exp(results / len(components)) def _componentname(self, component): index, factor = component prefix = '-' if factor < 0 else '' name = self.names[index] return prefix + (name[2:] if name.startswith('X_') else name) def summaryhsl(all_summaries, summary): """ Choose a color for the given system summary to distinguish it from other types of systems. Returns hue, saturation, and luminance for the start of the range, and how much the hue can be randomly varied while staying distinguishable. """ lowest_att = min(att for att, ms in all_summaries) highest_att = max(att for att, ms in all_summaries) att_range = highest_att - lowest_att + 1 attractors, monotonic_species = summary lowest_ms = min(ms for att, ms in all_summaries if att == attractors) highest_ms = max(ms for att, ms in all_summaries if att == attractors) ms_range = highest_ms - lowest_ms + 1 bin_width = 1 / (ms_range + 1) / att_range hue = ((highest_att - attractors) / att_range) + (highest_ms - monotonic_species) * bin_width variability_squeeze = (2 if att_range > 1 else 1) * (2 if ms_range > 1 else 1) return hue, 1, colorsys.ONE_THIRD, bin_width / variability_squeeze def plotheatmap(report, figsize=None, labelsize=None, conc_colorbar=False, arcs=None, downsample=None, arc_downsample=None, color_columns=False, osc_orbits=1, fold_dist=None, bicluster=False, osc_linkage=0): """ Set up a cluster-heatmap in the current pyplot. Arguments: - report: full parameter set sampling report - figsize: figure size as a tuple of inches (width by height) - labelsize: font size for species/column labels - conc_colorbar: whether to add a colorbar for concentrations (matching the heatmap) - arcs: "arcs" to connect parameter sets' attractors by arcs, "straight" to connect by sharp lines, or None for no connectors - downsample: downsampling ruleset for showing systems in the heatmap - arc_downsample: downsampling ruleset for connecting systems in connector columns (columns will be hidden if no systems are selected for them) - color_columns: whether to color connector column labels (will match scatter-line plot if same downsampling ruleset is used) - osc_orbits: how many orbits of the slowest oscillator to show in a gradient - fold_dist: maximum distance under which attractors will be folded into one heatmap row (also adds fold intensity column and colorbar) - bicluster: whether to also cluster species (adds a dendrogram at the top) - osc_linkage: how strongly to keep oscillatory attractors together (1 usually puts most together, 2 usually moves them to the top) """ gene_names = [(n[2:] if n.startswith('X_') else n) for n in report['species_names']] random.seed(1) summary_occurrences = categorizeattractors(report) filtered_psets = applydownsample(summary_occurrences, downsample) filtered_pset_types = categorizeattractors(filtered_psets) distinct_summaries = list(filtered_pset_types.keys()) if arcs: arc_pset_types = categorizeattractors(applydownsample(filtered_pset_types, arc_downsample)) if arc_downsample else filtered_pset_types for att, ms in list(arc_pset_types.keys()): if att <= 1: del arc_pset_types[att, ms] dendrogram_ratio = 3 / (13 + 2 * len(arc_pset_types)) else: dendrogram_ratio = 0.2 detail_matrix = None unique_fingerprints = None row_redundancies = None for pset in filtered_psets: pset['indexes'] = [] for attractor in pset['attractors']: linkable = caricatureattractor(attractor) if isoscillator(attractor): linkable.append(osc_linkage) fingerprint = [100] orbit = np.array(attractor['orbit']) for s, species in enumerate(attractor['species']): avg_speed = np.mean(np.abs(orbit[1:, s] - orbit[:-1, s])) / (orbit.shape[0] - 1) linkable.append(avg_speed) fingerprint.extend([np.min(orbit[:, s]), np.max(orbit[:, s]), avg_speed * osc_linkage]) else: linkable.extend([0] * (len(gene_names) + 1)) fingerprint = [0] for conc in attractor: fingerprint.extend([conc, conc, 0]) linkable = np.array(linkable) fingerprint = np.array(fingerprint) if detail_matrix is None: detail_matrix = np.vstack((linkable, )) if fold_dist is not None: unique_fingerprints = np.vstack((fingerprint, )) row_redundancies = [1] pset['indexes'].append(0) elif fold_dist is None: pset['indexes'].append(detail_matrix.shape[0]) detail_matrix = np.vstack((detail_matrix, linkable)) else: existing_indexes, = np.where(np.linalg.norm(unique_fingerprints - fingerprint, axis=1) < fold_dist * 2) if len(existing_indexes) > 0: pset['indexes'].append(existing_indexes[0]) row_redundancies[existing_indexes[0]] += 1 else: pset['indexes'].append(detail_matrix.shape[0]) detail_matrix = np.vstack((detail_matrix, linkable)) unique_fingerprints = np.vstack((unique_fingerprints, fingerprint)) row_redundancies.append(1) matrix = detail_matrix[:, :len(gene_names)] linkage = spcluster.hierarchy.linkage(detail_matrix, metric='euclidean', method='average') if osc_linkage > 0 else None gene_dendrogram_ratio = 0.1 if bicluster else 0 figsize = (10, 10) if figsize is None else figsize cg = sns.clustermap(matrix, row_linkage=linkage, col_cluster=bicluster, cbar_pos=None, dendrogram_ratio=(dendrogram_ratio, gene_dendrogram_ratio), xticklabels=gene_names, yticklabels=False, cmap='seismic', linecolor=None, rasterized=True, figsize=figsize) matrix_display_ind = {v: k for k, v in enumerate(cg.dendrogram_row.reordered_ind)} gene_display_ind = {v: k for k, v in enumerate(cg.dendrogram_col.reordered_ind)} if bicluster else {n: n for n in range(len(gene_names))} heatmap_index = 1 if fold_dist is None else 2 width_ratios = [2, 8] if arcs: width_ratios = [3, 10] + [2] * len(arc_pset_types) if fold_dist is not None: width_ratios.insert(1, width_ratios[1] * len(gene_names) * 0.01) width_ratios[2] -= width_ratios[1] rows = 2 if bicluster else 1 main_row = rows - 1 height_ratios = (1, 9) if bicluster else None new_gs = plt.GridSpec(rows, len(width_ratios), figure=cg.fig, width_ratios=width_ratios, height_ratios=height_ratios) cg.ax_heatmap.set_position(new_gs[main_row, heatmap_index].get_position(cg.fig)) if labelsize is not None: cg.ax_heatmap.tick_params(axis='x', labelsize=labelsize) if bicluster: cg.ax_col_dendrogram.set_position(new_gs[0, heatmap_index].get_position(cg.fig)) any_arc_columns = arcs is not None and len(arc_pset_types) > 0 if arcs: for fpt_id, summary in enumerate(sorted(arc_pset_types.keys(), key=lambda am: am[0] * 100 + am[1], reverse=True)): ax_arcs = cg.fig.add_subplot(new_gs[main_row, heatmap_index + 1 + fpt_id], sharey=cg.ax_heatmap) ax_arcs.tick_params(labelbottom=False, labelleft=False, bottom=False) color_cycle = ax_arcs._get_lines.prop_cycler for pset_id, pset in enumerate(arc_pset_types[summary]): if arcs == 'straight': height = 1.85 - 1.6 * pset_id / len(arc_pset_types[summary]) steepness = 0.18 * (1 - (height - 0.35) / 1.6) else: height = 1.75 - 0.2 * (pset_id % 8) + random.uniform(0, 0.1) color = next(color_cycle)['color'] rows = sorted(matrix_display_ind[i] for i in pset['indexes']) for i in range(len(rows) - 1): a, b = rows[i:(i + 2)] if a != b: if arcs == 'straight': segments = [[(0, a + 0.5), (height, a + 0.8 + steepness), (height, b + 0.2 - steepness), (0, b + 0.5)]] lc = mplcollect.LineCollection(segments, colors=color, linewidths=0.8) ax_arcs.add_collection(lc) else: ax_arcs.add_patch(mplpatch.Arc((0, (a + b) / 2 + 0.5), height, b - a, 180.0, 90.0, 270.0, edgecolor=color, linewidth=0.7)) if color_columns: hue, sat, lum, hue_vary_width = summaryhsl(distinct_summaries, summary) col_color = colorsys.hls_to_rgb(hue + hue_vary_width / 2, lum, sat) else: col_color = 'black' ax_arcs.set_xlabel(f'{summary[0]} att.,\n{summary[1]} m.s.', color=col_color) if arcs == 'straight': ax_arcs.set_xlim(0, 2) for spine in ['top', 'right', 'bottom']: ax_arcs.spines[spine].set_visible(False) mesh = cg.ax_heatmap.collections[0] mesh.set_edgecolor('face') mesh.set_antialiased(True) max_orbit_len = 0 for pset in filtered_psets: for attr in pset['attractors']: if isoscillator(attr): max_orbit_len = max(max_orbit_len, len(attr['orbit'])) orbit_render_len = max_orbit_len * osc_orbits for pset in filtered_psets: for index, attr in zip(pset['indexes'], pset['attractors']): if isoscillator(attr): display_y = matrix_display_ind[index] orbit = np.array(attr['orbit']) for x in range(orbit.shape[1]): display_x = gene_display_ind[x] x_stops = np.linspace(display_x, display_x + 1, orbit_render_len) color_stops = np.tile(np.vstack((orbit[:, x], orbit[:, x])), int(np.ceil(orbit_render_len / orbit.shape[0])))[:, :orbit_render_len] cg.ax_heatmap.pcolormesh(x_stops, [display_y, display_y + 1], color_stops, shading='gouraud', cmap=mesh.cmap, norm=mesh.norm, rasterized=True, aa=True) if fold_dist is not None: ax_redundancy = cg.fig.add_subplot(new_gs[main_row, 1], sharey=cg.ax_heatmap) reordered_redundancies = np.zeros((matrix.shape[0], 1)) for i, redundancy in enumerate(row_redundancies): reordered_redundancies[matrix_display_ind[i], 0] = redundancy fold_mesh = ax_redundancy.pcolormesh(reordered_redundancies, cmap='inferno', rasterized=True) ax_redundancy.tick_params(labelbottom=False, labelleft=False, bottom=False) for spine in ['top', 'left', 'bottom']: ax_redundancy.spines[spine].set_visible(False) if bicluster and (any_arc_columns or not conc_colorbar): ax_corner = cg.fig.add_subplot(new_gs[0, 0]) ax_corner.axis('off') ax_fold_cbar = mptinset.inset_axes(ax_corner, width='85%', height='20%', loc='center left') cg.fig.colorbar(fold_mesh, cax=ax_fold_cbar, orientation='horizontal', label='Instances') ax_fold_cbar.xaxis.set_label_position('top') largest_redundancy = reordered_redundancies.max() if largest_redundancy >= 10: tick_step = (largest_redundancy // 10) * 5 ax_fold_cbar.xaxis.set_major_locator(mpltick.MultipleLocator(tick_step)) else: ax_fold_cbar = mptinset.inset_axes(cg.ax_row_dendrogram, width='15%', height='15%', loc='lower left') cg.fig.colorbar(fold_mesh, cax=ax_fold_cbar, label='Instances') ax_fold_cbar.yaxis.set_label_position('left') if conc_colorbar: if bicluster: if fold_dist is not None and any_arc_columns: ax_conc_cbar = mptinset.inset_axes(cg.ax_col_dendrogram, width='100%', height='100%', bbox_to_anchor=(1.01, 0.4, 0.19, 0.2), bbox_transform=cg.ax_col_dendrogram.transAxes, borderpad=0) else: ax_corner = cg.fig.add_subplot(new_gs[0, 0]) ax_corner.axis('off') ax_conc_cbar = mptinset.inset_axes(ax_corner, width='80%', height='20%', loc='center left') cg.fig.colorbar(mesh, cax=ax_conc_cbar, orientation='horizontal', label='Conc.') ax_conc_cbar.xaxis.set_label_position('top') else: ax_conc_cbar = mptinset.inset_axes(cg.ax_row_dendrogram, width='15%', height='15%', loc='upper left') cg.fig.colorbar(mesh, cax=ax_conc_cbar, label='Conc.') ax_conc_cbar.yaxis.set_label_position('left') def deduplicateoscillators(report): """Eliminate oscillators that are extremely similar to another oscillator in each system, in-place.""" if not 'ftpoints' in report: return distance_cutoff = 15 * len(report['species_names']) / report['ftpoints'] def isorbitfar(orbit_from, orbit_to): min_distances = [] for pt in range(orbit_from.shape[0]): min_distance = np.min(np.linalg.norm(orbit_to - orbit_from[pt, :], axis=1)) if min_distance > distance_cutoff * 5: return True min_distances.append(min_distance) avg_min_distance = np.mean(min_distances) return avg_min_distance > distance_cutoff for pset in report['psets']: seen_orbits = [] attractors = pset['attractors'] for i in reversed(range(len(attractors))): attractor = attractors[i] if not isoscillator(attractor): continue orbit = np.array(attractor['orbit']) is_duplicate = False for seen_orbit in seen_orbits: if not (isorbitfar(orbit, seen_orbit) or isorbitfar(seen_orbit, orbit)): is_duplicate = True break if is_duplicate: del attractors[i] else: seen_orbits.append(orbit) def droposcillators(report): """Eliminate all systems containing any oscillatory attractors, in-place.""" report['psets'] = [p for p in report['psets'] if not any(isoscillator(a) for a in p['attractors'])] def parse_systemtype(system_spec): """ Parse a system type/summary specification. Examples: - "4att3ms" to match four-attractor systems with three species concentrations' monotonically correlated to each other - "4" to match four-attractor systems - "else" for a default rule Returns an object usable as a key by specificrulevalue. """ if system_spec == 'else': return None elif 'att' in system_spec: att, ms_rest = system_spec.split('att') return (int(att), int(ms_rest.split('ms')[0])) else: return int(system_spec) def parse_downsample(arg_list): """Parse a list of downsampling policies into a dict usable by specificrulevalue.""" def parse_one(arg): """ Parse a single downsampling policy. A downsampling policy consists of a system type (per parse_systemtype), a colon, and a limit or retention probability. A string ending in a percent sign is interpreted as a retention probability; otherwise, the policy value must be an integer specifying a limit. Returns a key-value pair usable by specificrulevalue. """ column, downsample = arg.split(':') if not downsample.endswith('%'): downsample = int(downsample) return (parse_systemtype(column), downsample) return dict(parse_one(arg) for arg in arg_list) if arg_list else None if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('report', type=str, help='input JSON report filename') parser.add_argument('graph', type=str, help='output graph image filename') parser.add_argument('--dpi', type=int, default=150, help='output bitmap image DPI') parser.add_argument('--figsize', type=float, nargs=2, help='figure dimensions in inches') parser.add_argument('--fontsize', type=float, help='default font size') parser.add_argument('--majorfontsize', type=float, help='font size for prominent text') parser.add_argument('--pointonly', action='store_true', help='do not show systems involving oscillators') subcmds = parser.add_subparsers(dest='command', required=True, help='kind of graph to make') table_parser = subcmds.add_parser('table') table_parser.add_argument('--counts', action='store_true', help='display counts in populated cells') table_parser.add_argument('--colorbar', action='store_true', help='show colorbar even when counts are displayed') scatterplot_parser = subcmds.add_parser('scatterplot') scatterplot_parser.add_argument('--line', action='store_true', help='connect attractors from the same parameter set') scatterplot_parser.add_argument('--contour', nargs='?', type=float, const=0.1, help='show density contour lines (starting at a CDF quantile)') scatterplot_parser.add_argument('--reduction', type=str, help='species for dimensions: X1,X2/Y1,Y2 (negatives allowed) or "pca" to run PCA') scatterplot_parser.add_argument('--downsample', nargs='+', type=str, help='chance of keeping a parameter set with specified type, e.g. 2:10% or 4att3ms:0') scatterplot_parser.add_argument('--density-downsample', '--dds', nargs='+', type=str, help='downsampling rules for purposes of density estimation') scatterplot_parser.add_argument('--focus', nargs='*', type=str, help='type(s) of parameter sets to focus on, e.g. 3att4ms or 4') scatterplot_parser.add_argument('--focus-osc', action='store_true', help='always focus parameter sets containing oscillations') scatterplot_parser.add_argument('--color', '--cc', action='store_true', help='color lines by parameter set type') scatterplot_parser.add_argument('--square', action='store_true', help='always use square axes') heatmap_parser = subcmds.add_parser('heatmap') heatmap_parser.add_argument('--colorbar', action='store_true', help='add colorbar for species concentrations') heatmap_parser.add_argument('--connect', type=str, choices=['arc', 'straight'], help='connect attractors from the same parameter set') heatmap_parser.add_argument('--connect-downsample', '--cds', nargs='+', help='downsample connectors e.g. 3att4ms:10% or 4att2ms:5') heatmap_parser.add_argument('--color-coordinate', '--cc', action='store_true', help='coordinate connection column label colors with scatterplot focus') heatmap_parser.add_argument('--downsample', nargs='+', type=str, help='chance of keeping a parameter set with specified type, e.g. e.g. 2:10% or 4att3ms:0') heatmap_parser.add_argument('--orbits', type=int, default=1, help='number of orbits to display for oscillatory attractors') heatmap_parser.add_argument('--osc-together', '--ot', nargs='?', type=float, const=1, default=0, help='cluster oscillatory attractors near each other') heatmap_parser.add_argument('--fold', type=float, help='distance under which attractors will be combined into one heatmap row') heatmap_parser.add_argument('--bicluster', action='store_true', help='also cluster genes') args = parser.parse_args() with open(args.report) as f: report = json.loads(f.read()) if args.pointonly: droposcillators(report) else: deduplicateoscillators(report) if args.fontsize is not None: plt.rc('font', size=args.fontsize) figsize = tuple(args.figsize) if args.figsize is not None else None if args.command == 'table': plotmultistability(report, figsize=figsize, label_counts=args.counts, colorbar=(args.colorbar or not args.counts)) elif args.command == 'scatterplot': reduction = PCA2D() if args.reduction == 'pca' else AverageLog(args.reduction) focus = {parse_systemtype(spec): True for spec in args.focus} if args.focus else None square = args.square or (args.reduction == 'pca') plotattractors(report, reduction, figsize=figsize, labelsize=args.majorfontsize, connect_psets=args.line, contour=args.contour, downsample=parse_downsample(args.downsample), density_downsample=parse_downsample(args.density_downsample), focus=focus, focus_osc=args.focus_osc, color_code=args.color, square=square) elif args.command == 'heatmap': if figsize is None and args.fontsize is None: plt.rc('font', size=18) plotheatmap(report, figsize=figsize, labelsize=args.majorfontsize, conc_colorbar=args.colorbar, arcs=args.connect, downsample=parse_downsample(args.downsample), arc_downsample=parse_downsample(args.connect_downsample), color_columns=args.color_coordinate, osc_orbits=args.orbits, fold_dist=args.fold, bicluster=args.bicluster, osc_linkage=args.osc_together) plt.savefig(args.graph, dpi=args.dpi) plt.close()
import argparse import collections import colorsys import copy import cycler import json import matplotlib.collections as mplcollect import matplotlib.colors as mplcolors import matplotlib.patches as mplpatch import matplotlib.pyplot as plt import matplotlib.ticker as mpltick import mpl_toolkits.axes_grid1.inset_locator as mptinset import numpy as np import random import scipy.cluster as spcluster import seaborn as sns from sklearn import decomposition, neighbors # Creates visualizations from JSON reports generated by multistability.py def isoscillator(attractor): """Determine whether the given attractor value is an oscillatory attractor.""" return isinstance(attractor, dict) def caricatureattractor(attractor): """Turn the given attractor information value (which might be an oscillation) into a single list, for comparison.""" if isoscillator(attractor): return list(np.mean(attractor['orbit'], axis=0)) if 'orbit' in attractor else [(s['max'] + s['min']) / 2 for s in attractor['species']] else: return list(attractor) def caricatureattractors(attractors): """Caricature each species in the given attractor set (nested list).""" return [caricatureattractor(a) for a in attractors] def summarizeattractors(pset_report): """Create a 2-tuple summarizing a set of attractors: attractor count, monotonic species count.""" attractors = caricatureattractors(pset_report['attractors']) species = len(attractors[0]) correlated_species = set() most_monotonic_species = 0 for i in range(species): if i in correlated_species: continue sorted_attractors = sorted(attractors, key=lambda a: a[i]) correlated_species.add(i) monotonic_species = 1 for j in set(range(species)).difference(correlated_species): attractor_concs = [a[j] for a in sorted_attractors] if attractor_concs == sorted(attractor_concs) or attractor_concs == sorted(attractor_concs, reverse=True): monotonic_species += 1 correlated_species.add(j) most_monotonic_species = max(most_monotonic_species, monotonic_species) return len(attractors), most_monotonic_species def categorizeattractors(report): """Create a dictionary of attractor summary tuples to lists of their occurrences from all parameter sets in the report.""" summary_occurrences = collections.defaultdict(list) pset_list = report['psets'] if isinstance(report, dict) else report for pset in pset_list: summary_occurrences[summarizeattractors(pset)].append(pset) return summary_occurrences def specificrulevalue(ruleset, summary, default=None): """ Determine the most specific policy for a system with the given multiattractor summary. The ruleset is a dict of rules, where each key is an aspect of varying specificity: - 2-tuples are the most specific and match systems with that summary. - Integers are less specific and match systems with that number of distinct attractors. - The None key indicates the default rule. """ specific = None attractors, monotonic = summary if summary in ruleset: specific = summary elif attractors in ruleset: specific = attractors return ruleset[specific] if specific in ruleset else default def applydownsample(summary_occurrences, downsample): """ Downsample a categorized collection of systems according to a ruleset. Arguments: - summary_occurrences: dict produced by categorizeattractors - downsample: system ruleset where the values are the number of each system type to keep (if int) or percent of systems to keep (if string ending in '%') Returns a flat list of pset/system reports. """ filtered_psets = [] for summary, occurrences in summary_occurrences.items(): n_psets = None if downsample is not None: limit_rule = specificrulevalue(downsample, summary, default=len(occurrences)) if isinstance(limit_rule, int): n_psets = limit_rule else: percent = float(limit_rule.split('%')[0]) n_psets = int(np.ceil(percent * len(occurrences) / 100)) if n_psets is None or n_psets >= len(occurrences): filtered_psets.extend(occurrences) else: filtered_psets.extend(random.sample(occurrences, n_psets)) return filtered_psets def plotmultistability(report, figsize=None, label_counts=False, colorbar=True): """ Set up a multistability table in the current pyplot. Arguments: - report: full parameter sampling report (likely deserialized from JSON) - figsize: figure size as a tuple of inches (width by height) - label_counts: whether to label cells with the count of systems - colorbar: whether to show a colorbar for the cell intensities/colors """ summary_occurrences = categorizeattractors(report) max_attractors = max(s[0] for s in summary_occurrences.keys()) min_attractors = min(s[0] for s in summary_occurrences.keys()) max_monotonic = len(report['species_names']) min_monotonic = 1 width = max_attractors - min_attractors + 1 x_range = range(min_attractors, max_attractors + 1) height = max_monotonic - min_monotonic + 1 y_range = reversed(range(min_monotonic, max_monotonic + 1)) heatmap_pixels = np.zeros((height, width), dtype=int) oscillators = np.zeros((height, width), dtype=int) for summary, occurrences in summary_occurrences.items(): x = summary[0] - min_attractors y = max_monotonic - summary[1] heatmap_pixels[y][x] = len(occurrences) oscillators[y][x] = sum(1 for oc in occurrences if any(isoscillator(at) for at in oc['attractors'])) fig, ax = plt.subplots(figsize=figsize) im = ax.imshow(heatmap_pixels, norm=mplcolors.LogNorm(vmax=heatmap_pixels.max())) if colorbar: fig.colorbar(im) ax.set_xticks(range(width)) ax.set_yticks(range(height)) ax.set_xticklabels([str(n) for n in x_range]) ax.set_yticklabels([str(n) for n in y_range]) ax.set_xlabel('Attractors') ax.set_ylabel('Monotonically correlated species') if label_counts: for y in range(height): for x in range(width): if heatmap_pixels[y][x] > 0: text = str(heatmap_pixels[y][x]) if oscillators[y][x] > 0: text = f'{text}\n({oscillators[y][x]} osc.)' ax.text(x, y, text, ha='center', va='center', color='gray') def plotattractors(report, reduction, figsize=None, labelsize=None, connect_psets=False, contour=False, downsample=None, density_downsample=None, focus=None, focus_osc=False, hide_defocused=False, color_code=False, square=False): """ Set up a hexbin or scatter-line plot in the current pyplot. Arguments: - report: full parameter sampling report - reduction: how to map concentration values to 2D space: an instance of e.g. PCA2D or AverageLog - figsize: figure size as a tuple of inches (width by height) - labelsize: font size for axis labels - connect_psets: whether to make a scatter-line plot instead of a hexbin plot - contour: proportion of density outside the lowest contour level, or False to not add contour lines - downsample: ruleset to downsample systems for display - density_downsample: ruleset to downsample systems for contour/density estimation - focus: Boolean-valued ruleset to focus systems (scatter-line only, default all focused) - focus_osc: whether to focus systems containing oscillators (scatter-line only, will defocus all others if focus not set) - hide_defocused: whether to hide all non-focused systems (scatter-line only) - color_code: whether to color lines by system type (scatter-line only) - square: whether to force a square plot """ reduction.prepare(report) random.seed(1) summary_occurrences = categorizeattractors(report) filtered_psets = applydownsample(summary_occurrences, downsample) points = reduction.reduce(psets_matrix(filtered_psets)) xlabel, ylabel = reduction.labels() fig, ax_main = plt.subplots(figsize=figsize) if connect_psets: distinct_summaries = list(categorizeattractors(filtered_psets).keys()) default_cycle = cycler.cycler(color=['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:gray', 'tab:olive', 'tab:cyan']) default_cycler = default_cycle() defocus_default_cycler = plt.rcParams['axes.prop_cycle']() for i, pset in enumerate(filtered_psets): pset_matrix = np.array(caricatureattractors(pset['attractors'])) pset_xy = reduction.reduce(pset_matrix) sorted_attractors = pset_xy[pset_matrix[:, 0].argsort(), :] point_mask = [not isoscillator(a) for a in pset['attractors']] has_oscillator = not all(point_mask) z = i linewidth = None oscwidth = 1.6 dotsize = 36.0 defocused = False summary = summarizeattractors(pset) if focus or focus_osc: if (focus_osc and has_oscillator) or (focus and specificrulevalue(focus, summary, default=False)): z += len(filtered_psets) + 1 elif hide_defocused: continue else: linewidth = 0.8 oscwidth = 1.1 dotsize = 10.0 defocused = True if color_code: hue, sat, lum, hue_vary_width = summaryhsl(distinct_summaries, summary) hue += random.uniform(0, hue_vary_width) if not defocused: lum *= random.uniform(0.85, 1.1) sat *= random.uniform(0.8, 1.0) elif defocused: next_prop = next(defocus_default_cycler) color_spec = next_prop['color'] r, g, b = mplcolors.to_rgb(color_spec) hue, sat, lum = colorsys.rgb_to_hls(r, g, b) if defocused: lum = min(1 - (1 - lum) * random.uniform(0.3, 0.5), 0.9) sat *= random.uniform(0.35, 0.45) if color_code or defocused: pset_color = colorsys.hls_to_rgb(hue, lum, sat) else: pset_color = next(default_cycler)['color'] ax_main.plot(sorted_attractors[:, 0], sorted_attractors[:, 1], lw=linewidth, color=pset_color, zorder=z) pointprops = {'s': dotsize} if defocused or not contour else {'linewidths': 1.0, 'edgecolors': 'white', 's': dotsize * 1.3} ax_main.scatter(pset_xy[point_mask, 0], pset_xy[point_mask, 1], color=pset_color, zorder=z, **pointprops) for osc in (a for a in pset['attractors'] if isoscillator(a)): vertices = np.array(osc['orbit']) projected_vertices = reduction.reduce(vertices) if projected_vertices.shape[0] >= 3: projected_vertices = np.vstack((projected_vertices, projected_vertices[0, :])) polygon = mplpatch.Polygon(projected_vertices, color=pset_color, linewidth=oscwidth, linestyle='--', fill=False, zorder=z) ax_main.add_patch(polygon) else: cmap = copy.copy(plt.get_cmap('viridis')) cmap.set_under('white', 1.0) hex_args = {'linewidths': 0.2, 'norm': mplcolors.LogNorm(vmin=2), 'cmap': cmap, 'gridsize': 40} bin_results = ax_main.hexbin(points[:, 0], points[:, 1], **hex_args) fig.colorbar(bin_results, ax=ax_main, label='Attractors') if contour: random.seed(1) density_filtered_psets = applydownsample(summary_occurrences, density_downsample) density_points = reduction.reduce(psets_matrix(density_filtered_psets)) kde = neighbors.KernelDensity(kernel='gaussian', bandwidth=0.1).fit(density_points) bin_x, bin_y = np.mgrid[(density_points[:, 0].min() - 0.15):(density_points[:, 0].max() + 0.15):80j, (density_points[:, 1].min() - 0.15):(density_points[:, 1].max() + 0.15):80j] density = np.exp(kde.score_samples(np.vstack((bin_x.flatten(), bin_y.flatten())).T)) sorted_densities = np.sort(density.flatten()) total_density = np.sum(sorted_densities) cdf = np.cumsum(sorted_densities) / total_density if connect_psets: cutoff_indices = [np.where(cdf > percentile)[0][0] for percentile in np.linspace(contour, 1, 5)[:-1]] levels = [sorted_densities[c] for c in cutoff_indices] + [total_density] colors = ['#c65ff560', '#af36e388', '#b300ff90', '#8500e2a0'] ax_main.contourf(bin_x, bin_y, density.reshape(bin_x.shape), levels, colors=colors, zorder=len(filtered_psets)) else: cutoff_indices = [np.where(cdf > percentile)[0][0] for percentile in np.linspace(contour, 0.9, 6)] levels = [sorted_densities[c] for c in cutoff_indices] widths = np.linspace(0.5, 1.4, 6) ax_main.contour(bin_x, bin_y, density.reshape(bin_x.shape), levels, linewidths=widths, colors='black', zorder=(len(filtered_psets) * 3), alpha=0.6) if square: ax_main.axis('square') elif reduction.equalscale(): ax_main.axis('equal') if reduction.zerobased('x'): ax_main.set_xlim(left=0) if reduction.zerobased('y'): ax_main.set_ylim(bottom=0) locator_base = reduction.locatorbase() if locator_base is not None: ax_main.xaxis.set_major_locator(mpltick.MultipleLocator(base=locator_base)) ax_main.yaxis.set_major_locator(mpltick.MultipleLocator(base=locator_base)) x_text = ax_main.set_xlabel(xlabel) if labelsize is not None: x_text.set_fontsize(labelsize) y_text = ax_main.set_ylabel(ylabel) if labelsize is not None: y_text.set_fontsize(labelsize) def psets_matrix(psets): """Create a NumPy 2D array of all attractors in all given parameter set reports.""" full_matrix = None for pset in psets: numeric_attractors = np.array(caricatureattractors(pset['attractors'])) if full_matrix is None: full_matrix = numeric_attractors else: full_matrix = np.vstack((full_matrix, numeric_attractors)) return full_matrix class PCA2D(): """Reduction that puts the first principal component on the X axis and second on the Y axis.""" def __init__(self): self.pca = decomposition.PCA(n_components=2) def prepare(self, report): self.pca.fit(psets_matrix(report['psets'])) def reduce(self, matrix): return self.pca.transform(matrix) def labels(self): return 'PC1', 'PC2' def zerobased(self, axis): return False def locatorbase(self): return 1.0 def equalscale(self): return True class AverageLog(): """Reduction that puts specified species' concentrations on each axis, averaging the logs if multiple species go on one axis.""" def __init__(self, settings=None): """ Specify the reduction axes as a string. X and Y axes' settings are separated by a slash. Each axis' settings is a comma-separated list of species names. A species name can be prefixed with a dash to invert it. """ self.settings = settings def prepare(self, report): self.names = report['species_names'] if self.settings is None: raise NotImplementedError('You must specify genes for reduction axes') else: x, y = self.settings.split('/') self.x_components = [self._parsecomponent(report, c.strip()) for c in x.split(',')] self.y_components = [self._parsecomponent(report, c.strip()) for c in y.split(',')] def reduce(self, matrix): return np.stack((self._componentwisereduce(matrix, self.x_components), self._componentwisereduce(matrix, self.y_components)), 1) def labels(self): return ', '.join((self._componentname(c) for c in self.x_components)), ', '.join((self._componentname(c) for c in self.y_components)) def zerobased(self, axis): axis_components = self.x_components if axis == 'x' else self.y_components return len(axis_components) == 1 and axis_components[0][1] == 1 def locatorbase(self): return 0.5 if self.equalscale() else None def equalscale(self): return self.zerobased('x') and self.zerobased('y') def _parsecomponent(self, report, text): if text.startswith('-'): text = text[1:] factor = -1 else: factor = 1 index = self.names.index(text) if text in self.names else self.names.index(f'X_{text}') return index, factor def _componentwisereduce(self, matrix, components): results = None for index, factor in components: component_log = np.log(matrix[:, index]) * factor if results is None: results = component_log else: results += component_log return np.exp(results / len(components)) def _componentname(self, component): index, factor = component prefix = '-' if factor < 0 else '' name = self.names[index] return prefix + (name[2:] if name.startswith('X_') else name) def summaryhsl(all_summaries, summary): """ Choose a color for the given system summary to distinguish it from other types of systems. Returns hue, saturation, and luminance for the start of the range, and how much the hue can be randomly varied while staying distinguishable. """ lowest_att = min(att for att, ms in all_summaries) highest_att = max(att for att, ms in all_summaries) att_range = highest_att - lowest_att + 1 attractors, monotonic_species = summary lowest_ms = min(ms for att, ms in all_summaries if att == attractors) highest_ms = max(ms for att, ms in all_summaries if att == attractors) ms_range = highest_ms - lowest_ms + 1 bin_width = 1 / (ms_range + 1) / att_range hue = ((highest_att - attractors) / att_range) + (highest_ms - monotonic_species) * bin_width variability_squeeze = (2 if att_range > 1 else 1) * (2 if ms_range > 1 else 1) return hue, 1, colorsys.ONE_THIRD, bin_width / variability_squeeze def plotheatmap(report, figsize=None, labelsize=None, conc_colorbar=False, arcs=None, downsample=None, arc_downsample=None, color_columns=False, osc_orbits=1, fold_dist=None, bicluster=False, osc_linkage=0): """ Set up a cluster-heatmap in the current pyplot. Arguments: - report: full parameter set sampling report - figsize: figure size as a tuple of inches (width by height) - labelsize: font size for species/column labels - conc_colorbar: whether to add a colorbar for concentrations (matching the heatmap) - arcs: "arcs" to connect parameter sets' attractors by arcs, "straight" to connect by sharp lines, or None for no connectors - downsample: downsampling ruleset for showing systems in the heatmap - arc_downsample: downsampling ruleset for connecting systems in connector columns (columns will be hidden if no systems are selected for them) - color_columns: whether to color connector column labels (will match scatter-line plot if same downsampling ruleset is used) - osc_orbits: how many orbits of the slowest oscillator to show in a gradient - fold_dist: maximum distance under which attractors will be folded into one heatmap row (also adds fold intensity column and colorbar) - bicluster: whether to also cluster species (adds a dendrogram at the top) - osc_linkage: how strongly to keep oscillatory attractors together (1 usually puts most together, 2 usually moves them to the top) """ gene_names = [(n[2:] if n.startswith('X_') else n) for n in report['species_names']] random.seed(1) summary_occurrences = categorizeattractors(report) filtered_psets = applydownsample(summary_occurrences, downsample) filtered_pset_types = categorizeattractors(filtered_psets) distinct_summaries = list(filtered_pset_types.keys()) if arcs: arc_pset_types = categorizeattractors(applydownsample(filtered_pset_types, arc_downsample)) if arc_downsample else filtered_pset_types for att, ms in list(arc_pset_types.keys()): if att <= 1: del arc_pset_types[att, ms] dendrogram_ratio = 3 / (13 + 2 * len(arc_pset_types)) else: dendrogram_ratio = 0.2 detail_matrix = None unique_fingerprints = None row_redundancies = None for pset in filtered_psets: pset['indexes'] = [] for attractor in pset['attractors']: linkable = caricatureattractor(attractor) if isoscillator(attractor): linkable.append(osc_linkage) fingerprint = [100] orbit = np.array(attractor['orbit']) for s, species in enumerate(attractor['species']): avg_speed = np.mean(np.abs(orbit[1:, s] - orbit[:-1, s])) / (orbit.shape[0] - 1) linkable.append(avg_speed) fingerprint.extend([np.min(orbit[:, s]), np.max(orbit[:, s]), avg_speed * osc_linkage]) else: linkable.extend([0] * (len(gene_names) + 1)) fingerprint = [0] for conc in attractor: fingerprint.extend([conc, conc, 0]) linkable = np.array(linkable) fingerprint = np.array(fingerprint) if detail_matrix is None: detail_matrix = np.vstack((linkable, )) if fold_dist is not None: unique_fingerprints = np.vstack((fingerprint, )) row_redundancies = [1] pset['indexes'].append(0) elif fold_dist is None: pset['indexes'].append(detail_matrix.shape[0]) detail_matrix = np.vstack((detail_matrix, linkable)) else: existing_indexes, = np.where(np.linalg.norm(unique_fingerprints - fingerprint, axis=1) < fold_dist * 2) if len(existing_indexes) > 0: pset['indexes'].append(existing_indexes[0]) row_redundancies[existing_indexes[0]] += 1 else: pset['indexes'].append(detail_matrix.shape[0]) detail_matrix = np.vstack((detail_matrix, linkable)) unique_fingerprints = np.vstack((unique_fingerprints, fingerprint)) row_redundancies.append(1) matrix = detail_matrix[:, :len(gene_names)] linkage = spcluster.hierarchy.linkage(detail_matrix, metric='euclidean', method='average') if osc_linkage > 0 else None gene_dendrogram_ratio = 0.1 if bicluster else 0 figsize = (10, 10) if figsize is None else figsize cg = sns.clustermap(matrix, row_linkage=linkage, col_cluster=bicluster, cbar_pos=None, dendrogram_ratio=(dendrogram_ratio, gene_dendrogram_ratio), xticklabels=gene_names, yticklabels=False, cmap='seismic', linecolor=None, rasterized=True, figsize=figsize) matrix_display_ind = {v: k for k, v in enumerate(cg.dendrogram_row.reordered_ind)} gene_display_ind = {v: k for k, v in enumerate(cg.dendrogram_col.reordered_ind)} if bicluster else {n: n for n in range(len(gene_names))} heatmap_index = 1 if fold_dist is None else 2 width_ratios = [2, 8] if arcs: width_ratios = [3, 10] + [2] * len(arc_pset_types) if fold_dist is not None: width_ratios.insert(1, width_ratios[1] * len(gene_names) * 0.01) width_ratios[2] -= width_ratios[1] rows = 2 if bicluster else 1 main_row = rows - 1 height_ratios = (1, 9) if bicluster else None new_gs = plt.GridSpec(rows, len(width_ratios), figure=cg.fig, width_ratios=width_ratios, height_ratios=height_ratios) cg.ax_heatmap.set_position(new_gs[main_row, heatmap_index].get_position(cg.fig)) if labelsize is not None: cg.ax_heatmap.tick_params(axis='x', labelsize=labelsize) if bicluster: cg.ax_col_dendrogram.set_position(new_gs[0, heatmap_index].get_position(cg.fig)) any_arc_columns = arcs is not None and len(arc_pset_types) > 0 if arcs: for fpt_id, summary in enumerate(sorted(arc_pset_types.keys(), key=lambda am: am[0] * 100 + am[1], reverse=True)): ax_arcs = cg.fig.add_subplot(new_gs[main_row, heatmap_index + 1 + fpt_id], sharey=cg.ax_heatmap) ax_arcs.tick_params(labelbottom=False, labelleft=False, bottom=False) color_cycle = ax_arcs._get_lines.prop_cycler for pset_id, pset in enumerate(arc_pset_types[summary]): if arcs == 'straight': height = 1.85 - 1.6 * pset_id / len(arc_pset_types[summary]) steepness = 0.18 * (1 - (height - 0.35) / 1.6) else: height = 1.75 - 0.2 * (pset_id % 8) + random.uniform(0, 0.1) color = next(color_cycle)['color'] rows = sorted(matrix_display_ind[i] for i in pset['indexes']) for i in range(len(rows) - 1): a, b = rows[i:(i + 2)] if a != b: if arcs == 'straight': segments = [[(0, a + 0.5), (height, a + 0.8 + steepness), (height, b + 0.2 - steepness), (0, b + 0.5)]] lc = mplcollect.LineCollection(segments, colors=color, linewidths=0.8) ax_arcs.add_collection(lc) else: ax_arcs.add_patch(mplpatch.Arc((0, (a + b) / 2 + 0.5), height, b - a, 180.0, 90.0, 270.0, edgecolor=color, linewidth=0.7)) if color_columns: hue, sat, lum, hue_vary_width = summaryhsl(distinct_summaries, summary) col_color = colorsys.hls_to_rgb(hue + hue_vary_width / 2, lum, sat) else: col_color = 'black' ax_arcs.set_xlabel(f'{summary[0]} att.,\n{summary[1]} m.s.', color=col_color) if arcs == 'straight': ax_arcs.set_xlim(0, 2) for spine in ['top', 'right', 'bottom']: ax_arcs.spines[spine].set_visible(False) mesh = cg.ax_heatmap.collections[0] mesh.set_edgecolor('face') mesh.set_antialiased(True) max_orbit_len = 0 for pset in filtered_psets: for attr in pset['attractors']: if isoscillator(attr): max_orbit_len = max(max_orbit_len, len(attr['orbit'])) orbit_render_len = max_orbit_len * osc_orbits for pset in filtered_psets: for index, attr in zip(pset['indexes'], pset['attractors']): if isoscillator(attr): display_y = matrix_display_ind[index] orbit = np.array(attr['orbit']) for x in range(orbit.shape[1]): display_x = gene_display_ind[x] x_stops = np.linspace(display_x, display_x + 1, orbit_render_len) color_stops = np.tile(np.vstack((orbit[:, x], orbit[:, x])), int(np.ceil(orbit_render_len / orbit.shape[0])))[:, :orbit_render_len] cg.ax_heatmap.pcolormesh(x_stops, [display_y, display_y + 1], color_stops, shading='gouraud', cmap=mesh.cmap, norm=mesh.norm, rasterized=True, aa=True) if fold_dist is not None: ax_redundancy = cg.fig.add_subplot(new_gs[main_row, 1], sharey=cg.ax_heatmap) reordered_redundancies = np.zeros((matrix.shape[0], 1)) for i, redundancy in enumerate(row_redundancies): reordered_redundancies[matrix_display_ind[i], 0] = redundancy fold_mesh = ax_redundancy.pcolormesh(reordered_redundancies, cmap='inferno', rasterized=True) ax_redundancy.tick_params(labelbottom=False, labelleft=False, bottom=False) for spine in ['top', 'left', 'bottom']: ax_redundancy.spines[spine].set_visible(False) if bicluster and (any_arc_columns or not conc_colorbar): ax_corner = cg.fig.add_subplot(new_gs[0, 0]) ax_corner.axis('off') ax_fold_cbar = mptinset.inset_axes(ax_corner, width='85%', height='20%', loc='center left') cg.fig.colorbar(fold_mesh, cax=ax_fold_cbar, orientation='horizontal', label='Instances') ax_fold_cbar.xaxis.set_label_position('top') largest_redundancy = reordered_redundancies.max() if largest_redundancy >= 10: tick_step = (largest_redundancy // 10) * 5 ax_fold_cbar.xaxis.set_major_locator(mpltick.MultipleLocator(tick_step)) else: ax_fold_cbar = mptinset.inset_axes(cg.ax_row_dendrogram, width='15%', height='15%', loc='lower left') cg.fig.colorbar(fold_mesh, cax=ax_fold_cbar, label='Instances') ax_fold_cbar.yaxis.set_label_position('left') if conc_colorbar: if bicluster: if fold_dist is not None and any_arc_columns: ax_conc_cbar = mptinset.inset_axes(cg.ax_col_dendrogram, width='100%', height='100%', bbox_to_anchor=(1.01, 0.4, 0.19, 0.2), bbox_transform=cg.ax_col_dendrogram.transAxes, borderpad=0) else: ax_corner = cg.fig.add_subplot(new_gs[0, 0]) ax_corner.axis('off') ax_conc_cbar = mptinset.inset_axes(ax_corner, width='80%', height='20%', loc='center left') cg.fig.colorbar(mesh, cax=ax_conc_cbar, orientation='horizontal', label='Conc.') ax_conc_cbar.xaxis.set_label_position('top') else: ax_conc_cbar = mptinset.inset_axes(cg.ax_row_dendrogram, width='15%', height='15%', loc='upper left') cg.fig.colorbar(mesh, cax=ax_conc_cbar, label='Conc.') ax_conc_cbar.yaxis.set_label_position('left') def deduplicateoscillators(report): """Eliminate oscillators that are extremely similar to another oscillator in each system, in-place.""" if not 'ftpoints' in report: return distance_cutoff = 15 * len(report['species_names']) / report['ftpoints'] def isorbitfar(orbit_from, orbit_to): min_distances = [] for pt in range(orbit_from.shape[0]): min_distance = np.min(np.linalg.norm(orbit_to - orbit_from[pt, :], axis=1)) if min_distance > distance_cutoff * 5: return True min_distances.append(min_distance) avg_min_distance = np.mean(min_distances) return avg_min_distance > distance_cutoff for pset in report['psets']: seen_orbits = [] attractors = pset['attractors'] for i in reversed(range(len(attractors))): attractor = attractors[i] if not isoscillator(attractor): continue orbit = np.array(attractor['orbit']) is_duplicate = False for seen_orbit in seen_orbits: if not (isorbitfar(orbit, seen_orbit) or isorbitfar(seen_orbit, orbit)): is_duplicate = True break if is_duplicate: del attractors[i] else: seen_orbits.append(orbit) def droposcillators(report): """Eliminate all systems containing any oscillatory attractors, in-place.""" report['psets'] = [p for p in report['psets'] if not any(isoscillator(a) for a in p['attractors'])] def parse_systemtype(system_spec): """ Parse a system type/summary specification. Examples: - "4att3ms" to match four-attractor systems with three species concentrations' monotonically correlated to each other - "4" to match four-attractor systems - "else" for a default rule Returns an object usable as a key by specificrulevalue. """ if system_spec == 'else': return None elif 'att' in system_spec: att, ms_rest = system_spec.split('att') return (int(att), int(ms_rest.split('ms')[0])) else: return int(system_spec) def parse_downsample(arg_list): """Parse a list of downsampling policies into a dict usable by specificrulevalue.""" def parse_one(arg): """ Parse a single downsampling policy. A downsampling policy consists of a system type (per parse_systemtype), a colon, and a limit or retention probability. A string ending in a percent sign is interpreted as a retention probability; otherwise, the policy value must be an integer specifying a limit. Returns a key-value pair usable by specificrulevalue. """ column, downsample = arg.split(':') if not downsample.endswith('%'): downsample = int(downsample) return (parse_systemtype(column), downsample) return dict(parse_one(arg) for arg in arg_list) if arg_list else None if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('report', type=str, help='input JSON report filename') parser.add_argument('graph', type=str, help='output graph image filename') parser.add_argument('--dpi', type=int, default=150, help='output bitmap image DPI') parser.add_argument('--figsize', type=float, nargs=2, help='figure dimensions in inches') parser.add_argument('--fontsize', type=float, help='default font size') parser.add_argument('--majorfontsize', type=float, help='font size for prominent text') parser.add_argument('--pointonly', action='store_true', help='do not show systems involving oscillators') subcmds = parser.add_subparsers(dest='command', required=True, help='kind of graph to make') table_parser = subcmds.add_parser('table') table_parser.add_argument('--counts', action='store_true', help='display counts in populated cells') table_parser.add_argument('--colorbar', action='store_true', help='show colorbar even when counts are displayed') scatterplot_parser = subcmds.add_parser('scatterplot') scatterplot_parser.add_argument('--line', action='store_true', help='connect attractors from the same parameter set') scatterplot_parser.add_argument('--contour', nargs='?', type=float, const=0.1, help='show density contour lines (starting at a CDF quantile)') scatterplot_parser.add_argument('--reduction', type=str, help='species for dimensions: X1,X2/Y1,Y2 (negatives allowed) or "pca" to run PCA') scatterplot_parser.add_argument('--downsample', nargs='+', type=str, help='chance of keeping a parameter set with specified type, e.g. 2:10% or 4att3ms:0') scatterplot_parser.add_argument('--density-downsample', '--dds', nargs='+', type=str, help='downsampling rules for purposes of density estimation') scatterplot_parser.add_argument('--focus', nargs='*', type=str, help='type(s) of parameter sets to focus on, e.g. 3att4ms or 4') scatterplot_parser.add_argument('--focus-osc', action='store_true', help='always focus parameter sets containing oscillations') scatterplot_parser.add_argument('--color', '--cc', action='store_true', help='color lines by parameter set type') scatterplot_parser.add_argument('--square', action='store_true', help='always use square axes') heatmap_parser = subcmds.add_parser('heatmap') heatmap_parser.add_argument('--colorbar', action='store_true', help='add colorbar for species concentrations') heatmap_parser.add_argument('--connect', type=str, choices=['arc', 'straight'], help='connect attractors from the same parameter set') heatmap_parser.add_argument('--connect-downsample', '--cds', nargs='+', help='downsample connectors e.g. 3att4ms:10% or 4att2ms:5') heatmap_parser.add_argument('--color-coordinate', '--cc', action='store_true', help='coordinate connection column label colors with scatterplot focus') heatmap_parser.add_argument('--downsample', nargs='+', type=str, help='chance of keeping a parameter set with specified type, e.g. e.g. 2:10% or 4att3ms:0') heatmap_parser.add_argument('--orbits', type=int, default=1, help='number of orbits to display for oscillatory attractors') heatmap_parser.add_argument('--osc-together', '--ot', nargs='?', type=float, const=1, default=0, help='cluster oscillatory attractors near each other') heatmap_parser.add_argument('--fold', type=float, help='distance under which attractors will be combined into one heatmap row') heatmap_parser.add_argument('--bicluster', action='store_true', help='also cluster genes') args = parser.parse_args() with open(args.report) as f: report = json.loads(f.read()) if args.pointonly: droposcillators(report) else: deduplicateoscillators(report) if args.fontsize is not None: plt.rc('font', size=args.fontsize) figsize = tuple(args.figsize) if args.figsize is not None else None if args.command == 'table': plotmultistability(report, figsize=figsize, label_counts=args.counts, colorbar=(args.colorbar or not args.counts)) elif args.command == 'scatterplot': reduction = PCA2D() if args.reduction == 'pca' else AverageLog(args.reduction) focus = {parse_systemtype(spec): True for spec in args.focus} if args.focus else None square = args.square or (args.reduction == 'pca') plotattractors(report, reduction, figsize=figsize, labelsize=args.majorfontsize, connect_psets=args.line, contour=args.contour, downsample=parse_downsample(args.downsample), density_downsample=parse_downsample(args.density_downsample), focus=focus, focus_osc=args.focus_osc, color_code=args.color, square=square) elif args.command == 'heatmap': if figsize is None and args.fontsize is None: plt.rc('font', size=18) plotheatmap(report, figsize=figsize, labelsize=args.majorfontsize, conc_colorbar=args.colorbar, arcs=args.connect, downsample=parse_downsample(args.downsample), arc_downsample=parse_downsample(args.connect_downsample), color_columns=args.color_coordinate, osc_orbits=args.orbits, fold_dist=args.fold, bicluster=args.bicluster, osc_linkage=args.osc_together) plt.savefig(args.graph, dpi=args.dpi) plt.close()
en
0.825817
# Creates visualizations from JSON reports generated by multistability.py Determine whether the given attractor value is an oscillatory attractor. Turn the given attractor information value (which might be an oscillation) into a single list, for comparison. Caricature each species in the given attractor set (nested list). Create a 2-tuple summarizing a set of attractors: attractor count, monotonic species count. Create a dictionary of attractor summary tuples to lists of their occurrences from all parameter sets in the report. Determine the most specific policy for a system with the given multiattractor summary. The ruleset is a dict of rules, where each key is an aspect of varying specificity: - 2-tuples are the most specific and match systems with that summary. - Integers are less specific and match systems with that number of distinct attractors. - The None key indicates the default rule. Downsample a categorized collection of systems according to a ruleset. Arguments: - summary_occurrences: dict produced by categorizeattractors - downsample: system ruleset where the values are the number of each system type to keep (if int) or percent of systems to keep (if string ending in '%') Returns a flat list of pset/system reports. Set up a multistability table in the current pyplot. Arguments: - report: full parameter sampling report (likely deserialized from JSON) - figsize: figure size as a tuple of inches (width by height) - label_counts: whether to label cells with the count of systems - colorbar: whether to show a colorbar for the cell intensities/colors Set up a hexbin or scatter-line plot in the current pyplot. Arguments: - report: full parameter sampling report - reduction: how to map concentration values to 2D space: an instance of e.g. PCA2D or AverageLog - figsize: figure size as a tuple of inches (width by height) - labelsize: font size for axis labels - connect_psets: whether to make a scatter-line plot instead of a hexbin plot - contour: proportion of density outside the lowest contour level, or False to not add contour lines - downsample: ruleset to downsample systems for display - density_downsample: ruleset to downsample systems for contour/density estimation - focus: Boolean-valued ruleset to focus systems (scatter-line only, default all focused) - focus_osc: whether to focus systems containing oscillators (scatter-line only, will defocus all others if focus not set) - hide_defocused: whether to hide all non-focused systems (scatter-line only) - color_code: whether to color lines by system type (scatter-line only) - square: whether to force a square plot Create a NumPy 2D array of all attractors in all given parameter set reports. Reduction that puts the first principal component on the X axis and second on the Y axis. Reduction that puts specified species' concentrations on each axis, averaging the logs if multiple species go on one axis. Specify the reduction axes as a string. X and Y axes' settings are separated by a slash. Each axis' settings is a comma-separated list of species names. A species name can be prefixed with a dash to invert it. Choose a color for the given system summary to distinguish it from other types of systems. Returns hue, saturation, and luminance for the start of the range, and how much the hue can be randomly varied while staying distinguishable. Set up a cluster-heatmap in the current pyplot. Arguments: - report: full parameter set sampling report - figsize: figure size as a tuple of inches (width by height) - labelsize: font size for species/column labels - conc_colorbar: whether to add a colorbar for concentrations (matching the heatmap) - arcs: "arcs" to connect parameter sets' attractors by arcs, "straight" to connect by sharp lines, or None for no connectors - downsample: downsampling ruleset for showing systems in the heatmap - arc_downsample: downsampling ruleset for connecting systems in connector columns (columns will be hidden if no systems are selected for them) - color_columns: whether to color connector column labels (will match scatter-line plot if same downsampling ruleset is used) - osc_orbits: how many orbits of the slowest oscillator to show in a gradient - fold_dist: maximum distance under which attractors will be folded into one heatmap row (also adds fold intensity column and colorbar) - bicluster: whether to also cluster species (adds a dendrogram at the top) - osc_linkage: how strongly to keep oscillatory attractors together (1 usually puts most together, 2 usually moves them to the top) Eliminate oscillators that are extremely similar to another oscillator in each system, in-place. Eliminate all systems containing any oscillatory attractors, in-place. Parse a system type/summary specification. Examples: - "4att3ms" to match four-attractor systems with three species concentrations' monotonically correlated to each other - "4" to match four-attractor systems - "else" for a default rule Returns an object usable as a key by specificrulevalue. Parse a list of downsampling policies into a dict usable by specificrulevalue. Parse a single downsampling policy. A downsampling policy consists of a system type (per parse_systemtype), a colon, and a limit or retention probability. A string ending in a percent sign is interpreted as a retention probability; otherwise, the policy value must be an integer specifying a limit. Returns a key-value pair usable by specificrulevalue.
2.668521
3
_08_APRIL_2020/_02_eastern_bunny.py
Andrey-V-Georgiev/PythonAdvanced
1
6631545
<gh_stars>1-10 import sys size = int(input()) matrix = [] player_position = [] # Fill up the matrix and find player position for i in range(size): row_list = list(input().split()) matrix.append(row_list) for j in range(size): if row_list[j] == 'B': player_position = [i, j] player_row, player_col = player_position[0], player_position[1] biggest_sum = -sys.maxsize - 1 biggest_sum_key = '' # check up up_positions = [] up_sum = 0 for i_up in range(player_row - 1, 0, -1): val_up = matrix[i_up][player_col] if val_up == 'X': break else: up_sum += int(val_up) up_positions.append([i_up, player_col]) if biggest_sum < up_sum and len(up_positions) > 0: biggest_sum = up_sum biggest_sum_key = 'up_positions' # check down down_positions = [] down_sum = 0 for i_down in range(player_row + 1, size): val_down = matrix[i_down][player_col] if val_down == 'X': break else: down_sum += int(val_down) down_positions.append([i_down, player_col]) if biggest_sum < down_sum and len(down_positions) > 0: biggest_sum = down_sum biggest_sum_key = 'down_positions' # check left left_positions = [] left_sum = 0 for i_left in range(player_col - 1, 0, -1): val_left = matrix[player_row][i_left] if val_left == 'X': break else: left_sum += int(val_left) left_positions.append([player_row, i_left]) if biggest_sum < left_sum and len(left_positions) > 0: biggest_sum = left_sum biggest_sum_key = 'left_positions' # check right right_positions = [] right_sum = 0 for i_right in range(player_col + 1, size): val_right = matrix[player_row][i_right] if val_right == 'X': break else: right_sum += int(val_right) right_positions.append([player_row, i_right]) if biggest_sum < right_sum and len(right_positions) > 0: biggest_sum = right_sum biggest_sum_key = 'right_positions' if biggest_sum_key == 'up_positions': print('up') [print(position) for position in up_positions] print(up_sum) elif biggest_sum_key == 'down_positions': print('down') [print(position) for position in down_positions] print(down_sum) elif biggest_sum_key == 'left_positions': print('left') [print(position) for position in left_positions] print(left_sum) elif biggest_sum_key == 'right_positions': print('right') [print(position) for position in right_positions] print(right_sum)
import sys size = int(input()) matrix = [] player_position = [] # Fill up the matrix and find player position for i in range(size): row_list = list(input().split()) matrix.append(row_list) for j in range(size): if row_list[j] == 'B': player_position = [i, j] player_row, player_col = player_position[0], player_position[1] biggest_sum = -sys.maxsize - 1 biggest_sum_key = '' # check up up_positions = [] up_sum = 0 for i_up in range(player_row - 1, 0, -1): val_up = matrix[i_up][player_col] if val_up == 'X': break else: up_sum += int(val_up) up_positions.append([i_up, player_col]) if biggest_sum < up_sum and len(up_positions) > 0: biggest_sum = up_sum biggest_sum_key = 'up_positions' # check down down_positions = [] down_sum = 0 for i_down in range(player_row + 1, size): val_down = matrix[i_down][player_col] if val_down == 'X': break else: down_sum += int(val_down) down_positions.append([i_down, player_col]) if biggest_sum < down_sum and len(down_positions) > 0: biggest_sum = down_sum biggest_sum_key = 'down_positions' # check left left_positions = [] left_sum = 0 for i_left in range(player_col - 1, 0, -1): val_left = matrix[player_row][i_left] if val_left == 'X': break else: left_sum += int(val_left) left_positions.append([player_row, i_left]) if biggest_sum < left_sum and len(left_positions) > 0: biggest_sum = left_sum biggest_sum_key = 'left_positions' # check right right_positions = [] right_sum = 0 for i_right in range(player_col + 1, size): val_right = matrix[player_row][i_right] if val_right == 'X': break else: right_sum += int(val_right) right_positions.append([player_row, i_right]) if biggest_sum < right_sum and len(right_positions) > 0: biggest_sum = right_sum biggest_sum_key = 'right_positions' if biggest_sum_key == 'up_positions': print('up') [print(position) for position in up_positions] print(up_sum) elif biggest_sum_key == 'down_positions': print('down') [print(position) for position in down_positions] print(down_sum) elif biggest_sum_key == 'left_positions': print('left') [print(position) for position in left_positions] print(left_sum) elif biggest_sum_key == 'right_positions': print('right') [print(position) for position in right_positions] print(right_sum)
en
0.680345
# Fill up the matrix and find player position # check up # check down # check left # check right
3.069986
3
src/qalgebra/core/matrix_algebra.py
anna-naden/qalgebra
2
6631546
<filename>src/qalgebra/core/matrix_algebra.py """Matrices of Expressions.""" import numpy as np import sympy from sympy import I, Symbol, sympify from .abstract_algebra import Expression, substitute from .abstract_quantum_algebra import QuantumExpression from .exceptions import NoConjugateMatrix, NonSquareMatrix from .hilbert_space_algebra import ProductSpace, TrivialSpace from .operator_algebra import adjoint from .scalar_algebra import is_scalar __all__ = [ 'Matrix', 'block_matrix', 'diagm', 'hstackm', 'identity_matrix', 'vstackm', 'zerosm', ] __private__ = [] # anything not in __all__ must be in __private__ class Matrix: """Matrix of Expressions.""" matrix = None _hash = None def __init__(self, m): if isinstance(m, np.ndarray): self.matrix = m elif isinstance(m, Matrix): self.matrix = np.array(m.matrix) else: self.matrix = np.array(m) if len(self.matrix.shape) < 2: self.matrix = self.matrix.reshape((self.matrix.shape[0], 1)) if len(self.matrix.shape) > 2: raise ValueError("Must have a shape of length 2") @property def shape(self): """The shape of the matrix ``(nrows, ncols)``.""" return self.matrix.shape @property def block_structure(self): """For square matrices this gives the block (-diagonal) structure of the matrix as a tuple of integers that sum up to the full dimension. :rtype: tuple """ n, m = self.shape if n != m: raise AttributeError( "block_structure only defined for square matrices" ) for k in range(1, n): if (self.matrix[:k, k:] == 0).all() and ( self.matrix[k:, :k] == 0 ).all(): return (k,) + self[k:, k:].block_structure return (n,) def _get_blocks(self, block_structure): n, m = self.shape if n == m: if not sum(block_structure) == n: raise ValueError() if not len(block_structure): return () j = block_structure[0] if (self.matrix[:j, j:] == 0).all() and ( self.matrix[j:, :j] == 0 ).all(): return (self[:j, :j],) + self[j:, j:]._get_blocks( block_structure[1:] ) else: raise ValueError() elif m == 1: if not len(block_structure): return () else: return (self[: block_structure[0], :],) + self[ : block_structure[0], : ]._get_blocks(block_structure[1:]) else: raise ValueError() @property def is_zero(self): """Are all elements of the matrix zero?""" for o in self.matrix.ravel(): try: if not o.is_zero: return False except AttributeError: if not o == 0: return False return True def __hash__(self): if not self._hash: self._hash = hash( (tuple(self.matrix.ravel()), self.matrix.shape, Matrix) ) return self._hash def __eq__(self, other): if isinstance(other, Matrix): return np.all(self.matrix == other.matrix) else: return np.all(self.matrix == other) def __add__(self, other): if isinstance(other, Matrix): return Matrix(self.matrix + other.matrix) else: return Matrix(self.matrix + other) def __radd__(self, other): return Matrix(other + self.matrix) def __mul__(self, other): if isinstance(other, Matrix): return Matrix(self.matrix.dot(other.matrix)) else: return Matrix(self.matrix * other) def __rmul__(self, other): return Matrix(other * self.matrix) def __sub__(self, other): return self + (-1) * other def __rsub__(self, other): return (-1) * self + other def __neg__(self): return (-1) * self def __truediv__(self, other): if is_scalar(other): return self * (sympify(1) / other) raise NotImplementedError( "Can't divide matrix %s by %s" % (self, other) ) def transpose(self): """The transpose matrix""" return Matrix(self.matrix.T) def conjugate(self): """The element-wise conjugate matrix. This is defined only if all the entries in the matrix have a defined conjugate (i.e., they have a `conjugate` method). This is *not* the case for a matrix of operators. In such a case, only an elementwise :func:`adjoint` would be applicable, but this is mathematically different from a complex conjugate. Raises: NoConjugateMatrix: if any entries have no `conjugate` method """ try: return Matrix(np.conjugate(self.matrix)) except (AttributeError, TypeError): raise NoConjugateMatrix( "Matrix %s contains entries that have no defined " "conjugate" % str(self) ) @property def real(self): """Element-wise real part. Raises: NoConjugateMatrix: if entries have no `conjugate` method and no other way to determine the real part Note: A mathematically equivalent way to obtain a real matrix from a complex matrix ``M`` is:: (M.conjugate() + M) / 2 However, the result may not be identical to ``M.real``, as the latter tries to convert elements of the matrix to real values directly, if possible, and only uses the conjugate as a fall-back """ def re(val): if hasattr(val, 'real'): return val.real elif hasattr(val, 'as_real_imag'): return val.as_real_imag()[0] elif hasattr(val, 'conjugate'): return (val.conjugate() + val) / 2 else: raise NoConjugateMatrix( "Matrix entry %s contains has no defined " "conjugate" % str(val) ) # Note: Do NOT use self.matrix.real! This will give wrong results, as # numpy thinks of objects (Operators) as real, even if they have no # defined real part return self.element_wise(re) @property def imag(self): """Element-wise imaginary part. Raises: NoConjugateMatrix: if entries have no `conjugate` method and no other way to determine the imaginary part Note: A mathematically equivalent way to obtain an imaginary matrix from a complex matrix ``M`` is:: (M.conjugate() - M) / (I * 2) with same same caveats as :attr:`real`. """ def im(val): if hasattr(val, 'imag'): return val.imag elif hasattr(val, 'as_real_imag'): return val.as_real_imag()[1] elif hasattr(val, 'conjugate'): return (val.conjugate() - val) / (2 * I) else: raise NoConjugateMatrix( "Matrix entry %s contains has no defined " "conjugate" % str(val) ) # Note: Do NOT use self.matrix.real! This will give wrong results, as # numpy thinks of objects (Operators) as real, even if they have no # defined real part return self.element_wise(im) @property def T(self): """Alias for :meth:`transpose`.""" return self.transpose() def adjoint(self): """Adjoint of the matrix. This is the transpose and the Hermitian adjoint of all elements.""" return self.T.element_wise(adjoint) dag = adjoint def trace(self): if self.shape[0] == self.shape[1]: return sum(self.matrix[k, k] for k in range(self.shape[0])) raise NonSquareMatrix(repr(self)) @property def H(self): """Alias for :meth:`adjoint`.""" return self.adjoint() def __getitem__(self, item_id): item = self.matrix.__getitem__(item_id) if isinstance(item, np.ndarray): return Matrix(item) return item def element_wise(self, func, *args, **kwargs): """Apply a function to each matrix element and return the result in a new operator matrix of the same shape. Args: func (callable): A function to be applied to each element. It must take the element as its first argument. args: Additional positional arguments to be passed to `func` kwargs: Additional keyword arguments to be passed to `func` Returns: Matrix: Matrix with results of `func`, applied element-wise. """ s = self.shape emat = [func(o, *args, **kwargs) for o in self.matrix.ravel()] return Matrix(np.array(emat).reshape(s)) def series_expand(self, param: Symbol, about, order: int): """Expand the matrix expression as a truncated power series in a scalar parameter. Args: param: Expansion parameter. about (.Scalar): Point about which to expand. order: Maximum order of expansion >= 0 Returns: tuple of length (order+1), where the entries are the expansion coefficients. """ s = self.shape emats = zip( *[ o.series_expand(param, about, order) for o in self.matrix.ravel() ] ) return tuple((Matrix(np.array(em).reshape(s)) for em in emats)) def expand(self): """Expand each matrix element distributively. Returns: Matrix: Expanded matrix. """ return self.element_wise( lambda o: o.expand() if isinstance(o, QuantumExpression) else o ) def substitute(self, var_map): """Perform a substitution in all element of the matrix. Equivalent to applying :func:`.substitute` element-wise. Returns: Matrix: Matrix with substitutions """ if self in var_map: return var_map[self] else: return self.element_wise(substitute, var_map=var_map) @property def free_symbols(self): """Free symbols, across all elements.""" ret = set() for o in self.matrix.ravel(): try: ret = ret | o.free_symbols except AttributeError: pass return ret @property def space(self): """Combined Hilbert space of all matrix elements. If none of the elements have an associated hilbert space, :obj:`.TrivialSpace`. """ arg_spaces = [ o.space for o in self.matrix.ravel() if hasattr(o, 'space') ] if len(arg_spaces) == 0: return TrivialSpace else: return ProductSpace.create(*arg_spaces) def simplify_scalar(self, func=sympy.simplify): """Simplify all scalar expressions appearing in the Matrix.""" def element_simplify(v): if isinstance(v, sympy.Basic): return func(v) elif isinstance(v, QuantumExpression): return v.simplify_scalar(func=func) else: return v return self.element_wise(element_simplify) def _repr_latex_(self): from qalgebra import latex return "$" + latex(self) + "$" def hstackm(matrices): """Generalizes `numpy.hstack` to :class:`.Matrix` objects.""" return Matrix(np.hstack(tuple(m.matrix for m in matrices))) def vstackm(matrices): """Generalizes `numpy.vstack` to :class:`.Matrix` objects.""" arr = np.vstack(tuple(m.matrix for m in matrices)) # print(tuple(m.matrix.dtype for m in matrices)) # print(arr.dtype) return Matrix(arr) def diagm(v, k=0): """Generalizes the diagonal matrix creation capabilities of `numpy.diag` to :class:`.Matrix` objects.""" return Matrix(np.diag(v, k)) def block_matrix(A, B, C, D): r"""Generate the operator matrix with quadrants .. math:: \begin{pmatrix} A B \\ C D \end{pmatrix} Args: A (Matrix): Matrix of shape ``(n, m)`` B (Matrix): Matrix of shape ``(n, k)`` C (Matrix): Matrix of shape ``(l, m)`` D (Matrix): Matrix of shape ``(l, k)`` Returns: Matrix: The combined block matrix ``[[A, B], [C, D]]``. """ return vstackm((hstackm((A, B)), hstackm((C, D)))) def identity_matrix(N): """Generate the N-dimensional identity matrix. Args: N (int): Dimension Returns: Matrix: Identity matrix in N dimensions """ return diagm(np.ones(N, dtype=int)) def zerosm(shape, *args, **kwargs): """Generalizes ``numpy.zeros`` to :class:`.Matrix` objects.""" return Matrix(np.zeros(shape, *args, **kwargs))
<filename>src/qalgebra/core/matrix_algebra.py """Matrices of Expressions.""" import numpy as np import sympy from sympy import I, Symbol, sympify from .abstract_algebra import Expression, substitute from .abstract_quantum_algebra import QuantumExpression from .exceptions import NoConjugateMatrix, NonSquareMatrix from .hilbert_space_algebra import ProductSpace, TrivialSpace from .operator_algebra import adjoint from .scalar_algebra import is_scalar __all__ = [ 'Matrix', 'block_matrix', 'diagm', 'hstackm', 'identity_matrix', 'vstackm', 'zerosm', ] __private__ = [] # anything not in __all__ must be in __private__ class Matrix: """Matrix of Expressions.""" matrix = None _hash = None def __init__(self, m): if isinstance(m, np.ndarray): self.matrix = m elif isinstance(m, Matrix): self.matrix = np.array(m.matrix) else: self.matrix = np.array(m) if len(self.matrix.shape) < 2: self.matrix = self.matrix.reshape((self.matrix.shape[0], 1)) if len(self.matrix.shape) > 2: raise ValueError("Must have a shape of length 2") @property def shape(self): """The shape of the matrix ``(nrows, ncols)``.""" return self.matrix.shape @property def block_structure(self): """For square matrices this gives the block (-diagonal) structure of the matrix as a tuple of integers that sum up to the full dimension. :rtype: tuple """ n, m = self.shape if n != m: raise AttributeError( "block_structure only defined for square matrices" ) for k in range(1, n): if (self.matrix[:k, k:] == 0).all() and ( self.matrix[k:, :k] == 0 ).all(): return (k,) + self[k:, k:].block_structure return (n,) def _get_blocks(self, block_structure): n, m = self.shape if n == m: if not sum(block_structure) == n: raise ValueError() if not len(block_structure): return () j = block_structure[0] if (self.matrix[:j, j:] == 0).all() and ( self.matrix[j:, :j] == 0 ).all(): return (self[:j, :j],) + self[j:, j:]._get_blocks( block_structure[1:] ) else: raise ValueError() elif m == 1: if not len(block_structure): return () else: return (self[: block_structure[0], :],) + self[ : block_structure[0], : ]._get_blocks(block_structure[1:]) else: raise ValueError() @property def is_zero(self): """Are all elements of the matrix zero?""" for o in self.matrix.ravel(): try: if not o.is_zero: return False except AttributeError: if not o == 0: return False return True def __hash__(self): if not self._hash: self._hash = hash( (tuple(self.matrix.ravel()), self.matrix.shape, Matrix) ) return self._hash def __eq__(self, other): if isinstance(other, Matrix): return np.all(self.matrix == other.matrix) else: return np.all(self.matrix == other) def __add__(self, other): if isinstance(other, Matrix): return Matrix(self.matrix + other.matrix) else: return Matrix(self.matrix + other) def __radd__(self, other): return Matrix(other + self.matrix) def __mul__(self, other): if isinstance(other, Matrix): return Matrix(self.matrix.dot(other.matrix)) else: return Matrix(self.matrix * other) def __rmul__(self, other): return Matrix(other * self.matrix) def __sub__(self, other): return self + (-1) * other def __rsub__(self, other): return (-1) * self + other def __neg__(self): return (-1) * self def __truediv__(self, other): if is_scalar(other): return self * (sympify(1) / other) raise NotImplementedError( "Can't divide matrix %s by %s" % (self, other) ) def transpose(self): """The transpose matrix""" return Matrix(self.matrix.T) def conjugate(self): """The element-wise conjugate matrix. This is defined only if all the entries in the matrix have a defined conjugate (i.e., they have a `conjugate` method). This is *not* the case for a matrix of operators. In such a case, only an elementwise :func:`adjoint` would be applicable, but this is mathematically different from a complex conjugate. Raises: NoConjugateMatrix: if any entries have no `conjugate` method """ try: return Matrix(np.conjugate(self.matrix)) except (AttributeError, TypeError): raise NoConjugateMatrix( "Matrix %s contains entries that have no defined " "conjugate" % str(self) ) @property def real(self): """Element-wise real part. Raises: NoConjugateMatrix: if entries have no `conjugate` method and no other way to determine the real part Note: A mathematically equivalent way to obtain a real matrix from a complex matrix ``M`` is:: (M.conjugate() + M) / 2 However, the result may not be identical to ``M.real``, as the latter tries to convert elements of the matrix to real values directly, if possible, and only uses the conjugate as a fall-back """ def re(val): if hasattr(val, 'real'): return val.real elif hasattr(val, 'as_real_imag'): return val.as_real_imag()[0] elif hasattr(val, 'conjugate'): return (val.conjugate() + val) / 2 else: raise NoConjugateMatrix( "Matrix entry %s contains has no defined " "conjugate" % str(val) ) # Note: Do NOT use self.matrix.real! This will give wrong results, as # numpy thinks of objects (Operators) as real, even if they have no # defined real part return self.element_wise(re) @property def imag(self): """Element-wise imaginary part. Raises: NoConjugateMatrix: if entries have no `conjugate` method and no other way to determine the imaginary part Note: A mathematically equivalent way to obtain an imaginary matrix from a complex matrix ``M`` is:: (M.conjugate() - M) / (I * 2) with same same caveats as :attr:`real`. """ def im(val): if hasattr(val, 'imag'): return val.imag elif hasattr(val, 'as_real_imag'): return val.as_real_imag()[1] elif hasattr(val, 'conjugate'): return (val.conjugate() - val) / (2 * I) else: raise NoConjugateMatrix( "Matrix entry %s contains has no defined " "conjugate" % str(val) ) # Note: Do NOT use self.matrix.real! This will give wrong results, as # numpy thinks of objects (Operators) as real, even if they have no # defined real part return self.element_wise(im) @property def T(self): """Alias for :meth:`transpose`.""" return self.transpose() def adjoint(self): """Adjoint of the matrix. This is the transpose and the Hermitian adjoint of all elements.""" return self.T.element_wise(adjoint) dag = adjoint def trace(self): if self.shape[0] == self.shape[1]: return sum(self.matrix[k, k] for k in range(self.shape[0])) raise NonSquareMatrix(repr(self)) @property def H(self): """Alias for :meth:`adjoint`.""" return self.adjoint() def __getitem__(self, item_id): item = self.matrix.__getitem__(item_id) if isinstance(item, np.ndarray): return Matrix(item) return item def element_wise(self, func, *args, **kwargs): """Apply a function to each matrix element and return the result in a new operator matrix of the same shape. Args: func (callable): A function to be applied to each element. It must take the element as its first argument. args: Additional positional arguments to be passed to `func` kwargs: Additional keyword arguments to be passed to `func` Returns: Matrix: Matrix with results of `func`, applied element-wise. """ s = self.shape emat = [func(o, *args, **kwargs) for o in self.matrix.ravel()] return Matrix(np.array(emat).reshape(s)) def series_expand(self, param: Symbol, about, order: int): """Expand the matrix expression as a truncated power series in a scalar parameter. Args: param: Expansion parameter. about (.Scalar): Point about which to expand. order: Maximum order of expansion >= 0 Returns: tuple of length (order+1), where the entries are the expansion coefficients. """ s = self.shape emats = zip( *[ o.series_expand(param, about, order) for o in self.matrix.ravel() ] ) return tuple((Matrix(np.array(em).reshape(s)) for em in emats)) def expand(self): """Expand each matrix element distributively. Returns: Matrix: Expanded matrix. """ return self.element_wise( lambda o: o.expand() if isinstance(o, QuantumExpression) else o ) def substitute(self, var_map): """Perform a substitution in all element of the matrix. Equivalent to applying :func:`.substitute` element-wise. Returns: Matrix: Matrix with substitutions """ if self in var_map: return var_map[self] else: return self.element_wise(substitute, var_map=var_map) @property def free_symbols(self): """Free symbols, across all elements.""" ret = set() for o in self.matrix.ravel(): try: ret = ret | o.free_symbols except AttributeError: pass return ret @property def space(self): """Combined Hilbert space of all matrix elements. If none of the elements have an associated hilbert space, :obj:`.TrivialSpace`. """ arg_spaces = [ o.space for o in self.matrix.ravel() if hasattr(o, 'space') ] if len(arg_spaces) == 0: return TrivialSpace else: return ProductSpace.create(*arg_spaces) def simplify_scalar(self, func=sympy.simplify): """Simplify all scalar expressions appearing in the Matrix.""" def element_simplify(v): if isinstance(v, sympy.Basic): return func(v) elif isinstance(v, QuantumExpression): return v.simplify_scalar(func=func) else: return v return self.element_wise(element_simplify) def _repr_latex_(self): from qalgebra import latex return "$" + latex(self) + "$" def hstackm(matrices): """Generalizes `numpy.hstack` to :class:`.Matrix` objects.""" return Matrix(np.hstack(tuple(m.matrix for m in matrices))) def vstackm(matrices): """Generalizes `numpy.vstack` to :class:`.Matrix` objects.""" arr = np.vstack(tuple(m.matrix for m in matrices)) # print(tuple(m.matrix.dtype for m in matrices)) # print(arr.dtype) return Matrix(arr) def diagm(v, k=0): """Generalizes the diagonal matrix creation capabilities of `numpy.diag` to :class:`.Matrix` objects.""" return Matrix(np.diag(v, k)) def block_matrix(A, B, C, D): r"""Generate the operator matrix with quadrants .. math:: \begin{pmatrix} A B \\ C D \end{pmatrix} Args: A (Matrix): Matrix of shape ``(n, m)`` B (Matrix): Matrix of shape ``(n, k)`` C (Matrix): Matrix of shape ``(l, m)`` D (Matrix): Matrix of shape ``(l, k)`` Returns: Matrix: The combined block matrix ``[[A, B], [C, D]]``. """ return vstackm((hstackm((A, B)), hstackm((C, D)))) def identity_matrix(N): """Generate the N-dimensional identity matrix. Args: N (int): Dimension Returns: Matrix: Identity matrix in N dimensions """ return diagm(np.ones(N, dtype=int)) def zerosm(shape, *args, **kwargs): """Generalizes ``numpy.zeros`` to :class:`.Matrix` objects.""" return Matrix(np.zeros(shape, *args, **kwargs))
en
0.748907
Matrices of Expressions. # anything not in __all__ must be in __private__ Matrix of Expressions. The shape of the matrix ``(nrows, ncols)``. For square matrices this gives the block (-diagonal) structure of the matrix as a tuple of integers that sum up to the full dimension. :rtype: tuple Are all elements of the matrix zero? The transpose matrix The element-wise conjugate matrix. This is defined only if all the entries in the matrix have a defined conjugate (i.e., they have a `conjugate` method). This is *not* the case for a matrix of operators. In such a case, only an elementwise :func:`adjoint` would be applicable, but this is mathematically different from a complex conjugate. Raises: NoConjugateMatrix: if any entries have no `conjugate` method Element-wise real part. Raises: NoConjugateMatrix: if entries have no `conjugate` method and no other way to determine the real part Note: A mathematically equivalent way to obtain a real matrix from a complex matrix ``M`` is:: (M.conjugate() + M) / 2 However, the result may not be identical to ``M.real``, as the latter tries to convert elements of the matrix to real values directly, if possible, and only uses the conjugate as a fall-back # Note: Do NOT use self.matrix.real! This will give wrong results, as # numpy thinks of objects (Operators) as real, even if they have no # defined real part Element-wise imaginary part. Raises: NoConjugateMatrix: if entries have no `conjugate` method and no other way to determine the imaginary part Note: A mathematically equivalent way to obtain an imaginary matrix from a complex matrix ``M`` is:: (M.conjugate() - M) / (I * 2) with same same caveats as :attr:`real`. # Note: Do NOT use self.matrix.real! This will give wrong results, as # numpy thinks of objects (Operators) as real, even if they have no # defined real part Alias for :meth:`transpose`. Adjoint of the matrix. This is the transpose and the Hermitian adjoint of all elements. Alias for :meth:`adjoint`. Apply a function to each matrix element and return the result in a new operator matrix of the same shape. Args: func (callable): A function to be applied to each element. It must take the element as its first argument. args: Additional positional arguments to be passed to `func` kwargs: Additional keyword arguments to be passed to `func` Returns: Matrix: Matrix with results of `func`, applied element-wise. Expand the matrix expression as a truncated power series in a scalar parameter. Args: param: Expansion parameter. about (.Scalar): Point about which to expand. order: Maximum order of expansion >= 0 Returns: tuple of length (order+1), where the entries are the expansion coefficients. Expand each matrix element distributively. Returns: Matrix: Expanded matrix. Perform a substitution in all element of the matrix. Equivalent to applying :func:`.substitute` element-wise. Returns: Matrix: Matrix with substitutions Free symbols, across all elements. Combined Hilbert space of all matrix elements. If none of the elements have an associated hilbert space, :obj:`.TrivialSpace`. Simplify all scalar expressions appearing in the Matrix. Generalizes `numpy.hstack` to :class:`.Matrix` objects. Generalizes `numpy.vstack` to :class:`.Matrix` objects. # print(tuple(m.matrix.dtype for m in matrices)) # print(arr.dtype) Generalizes the diagonal matrix creation capabilities of `numpy.diag` to :class:`.Matrix` objects. Generate the operator matrix with quadrants .. math:: \begin{pmatrix} A B \\ C D \end{pmatrix} Args: A (Matrix): Matrix of shape ``(n, m)`` B (Matrix): Matrix of shape ``(n, k)`` C (Matrix): Matrix of shape ``(l, m)`` D (Matrix): Matrix of shape ``(l, k)`` Returns: Matrix: The combined block matrix ``[[A, B], [C, D]]``. Generate the N-dimensional identity matrix. Args: N (int): Dimension Returns: Matrix: Identity matrix in N dimensions Generalizes ``numpy.zeros`` to :class:`.Matrix` objects.
3.152826
3
bokeh/tests/test_widgets.py
timelyportfolio/bokeh
1
6631547
from __future__ import absolute_import import unittest import inspect def get_prop_set(class_object): # all this does is get a list of every property implemented by the object that is not present in the baseclasses of said object # note it wont detect overridden properties! base_classes = list(inspect.getmro(class_object)) base_classes.remove(class_object) base_properties = [] for base_class in base_classes: base_properties.extend(dir(base_class)) class_properties = set(dir(class_object)).difference(set(base_properties)) return class_properties class TestPanel(unittest.TestCase): def setUp(self): from bokeh.models.widgets.panels import Panel self.panelCls = Panel def test_expectedprops(self): expected_properties = set(['title', 'child', 'closable']) actual_properties = get_prop_set(self.panelCls) self.assertTrue(expected_properties.issubset(actual_properties)) def test_prop_defaults(self): p1 = self.panelCls() p2 = self.panelCls() self.assertEqual(p1.title, None) self.assertEqual(p1.child, None) self.assertFalse(p1.closable) class TestTabs(unittest.TestCase): def setUp(self): from bokeh.models.widgets.panels import Tabs, Panel self.tabsCls = Tabs self.panelCls = Panel def test_expected_props(self): expected_properties = set(['tabs', 'active']) actual_properties = get_prop_set(self.tabsCls) self.assertTrue(expected_properties.issubset(actual_properties)) def test_props_defaults(self): tab = self.tabsCls() self.assertEqual(tab.tabs, []) self.assertEqual(tab.active, 0) class TestDialog(unittest.TestCase): def setUp(self): from bokeh.models.widgets.dialogs import Dialog self.dialogCls = Dialog def test_expected_props(self): expected_properties = set(['visible', 'closable', 'title', 'content', 'buttons']) actual_properties = get_prop_set(self.dialogCls) self.assertTrue(expected_properties.issubset(actual_properties)) def test_props_defaults(self): dialog = self.dialogCls() self.assertFalse(dialog.visible) self.assertTrue(dialog.closable) self.assertEqual(dialog.title, None) self.assertEqual(dialog.content, None) self.assertEqual(dialog.buttons, []) class TestLayout(unittest.TestCase): def setUp(self): from bokeh.models.widgets.layouts import Layout self.layoutCls = Layout def test_expected_props(self): expected_properties = set(['width', 'height']) actual_properties = get_prop_set(self.layoutCls) self.assertTrue(expected_properties.issubset(actual_properties)) def test_props_defaults(self): layout = self.layoutCls() self.assertEqual(layout.width, None) self.assertEqual(layout.height, None) if __name__ == "__main__": unittest.main()
from __future__ import absolute_import import unittest import inspect def get_prop_set(class_object): # all this does is get a list of every property implemented by the object that is not present in the baseclasses of said object # note it wont detect overridden properties! base_classes = list(inspect.getmro(class_object)) base_classes.remove(class_object) base_properties = [] for base_class in base_classes: base_properties.extend(dir(base_class)) class_properties = set(dir(class_object)).difference(set(base_properties)) return class_properties class TestPanel(unittest.TestCase): def setUp(self): from bokeh.models.widgets.panels import Panel self.panelCls = Panel def test_expectedprops(self): expected_properties = set(['title', 'child', 'closable']) actual_properties = get_prop_set(self.panelCls) self.assertTrue(expected_properties.issubset(actual_properties)) def test_prop_defaults(self): p1 = self.panelCls() p2 = self.panelCls() self.assertEqual(p1.title, None) self.assertEqual(p1.child, None) self.assertFalse(p1.closable) class TestTabs(unittest.TestCase): def setUp(self): from bokeh.models.widgets.panels import Tabs, Panel self.tabsCls = Tabs self.panelCls = Panel def test_expected_props(self): expected_properties = set(['tabs', 'active']) actual_properties = get_prop_set(self.tabsCls) self.assertTrue(expected_properties.issubset(actual_properties)) def test_props_defaults(self): tab = self.tabsCls() self.assertEqual(tab.tabs, []) self.assertEqual(tab.active, 0) class TestDialog(unittest.TestCase): def setUp(self): from bokeh.models.widgets.dialogs import Dialog self.dialogCls = Dialog def test_expected_props(self): expected_properties = set(['visible', 'closable', 'title', 'content', 'buttons']) actual_properties = get_prop_set(self.dialogCls) self.assertTrue(expected_properties.issubset(actual_properties)) def test_props_defaults(self): dialog = self.dialogCls() self.assertFalse(dialog.visible) self.assertTrue(dialog.closable) self.assertEqual(dialog.title, None) self.assertEqual(dialog.content, None) self.assertEqual(dialog.buttons, []) class TestLayout(unittest.TestCase): def setUp(self): from bokeh.models.widgets.layouts import Layout self.layoutCls = Layout def test_expected_props(self): expected_properties = set(['width', 'height']) actual_properties = get_prop_set(self.layoutCls) self.assertTrue(expected_properties.issubset(actual_properties)) def test_props_defaults(self): layout = self.layoutCls() self.assertEqual(layout.width, None) self.assertEqual(layout.height, None) if __name__ == "__main__": unittest.main()
en
0.944783
# all this does is get a list of every property implemented by the object that is not present in the baseclasses of said object # note it wont detect overridden properties!
2.268935
2
xmlParser.py
isseikz/StarTracker
0
6631548
<reponame>isseikz/StarTracker #!/usr/bin/python # -*- coding: utf-8 -*- import xml.etree.ElementTree as ET import urllib.parse def urlFromFile(filepath, debug=0): tree = ET.parse(filepath) root = tree.getroot() return addrFrom(root, debug) def urlFromXml(xml_string, debug=0): root = ET.fromstring(xml_string) return addrFrom(root, debug) def addrFrom(root, debug=0): if debug>0: for child in root: print(f'tag: {child.tag}, attribute: {child.attrib}') for cchild in child: print(f' tag: {cchild.tag}, attribute: {cchild.attrib}') for ccchild in cchild: print(f' tag: {ccchild.tag}, attribute: {ccchild.attrib}') for cccchild in ccchild: print(f' tag: {cccchild.tag}, attribute: {cccchild.attrib}') for ccccchild in cccchild: print(f' tag: {ccccchild.tag}, attribute: {ccccchild.attrib}') pass pass pass pass pass deviceInfo = root.find("{urn:schemas-upnp-org:device-1-0}device").find("{urn:schemas-sony-com:av}X_ScalarWebAPI_DeviceInfo") uri = urllib.parse.unquote(deviceInfo.find('{urn:schemas-sony-com:av}X_ScalarWebAPI_ImagingDevice').find('{urn:schemas-sony-com:av}X_ScalarWebAPI_LiveView_URL').text, 'utf-8') for service in deviceInfo.find('{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceList').findall('{urn:schemas-sony-com:av}X_ScalarWebAPI_Service'): print(service.find('{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceType').text) if service.find('{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceType').text == 'camera': camera = urllib.parse.unquote(service.find('{urn:schemas-sony-com:av}X_ScalarWebAPI_ActionList_URL').text).split('/') cameraHost = camera[2] cameraUrl = '/' + camera[3] + '/camera' print(f'control point: {camera}') if debug >0: print(f'parsed: {uri}') splitted = uri.split('/') host = uri.split('/')[2] url = '/'.join(uri.split('/')[3:len(splitted)]) if debug > 0: print(f'host: {host}, url: {url}') return uri, host, url, cameraHost, cameraUrl if __name__ == '__main__': urlFromFile('./Qx10Example.xml', 1)
#!/usr/bin/python # -*- coding: utf-8 -*- import xml.etree.ElementTree as ET import urllib.parse def urlFromFile(filepath, debug=0): tree = ET.parse(filepath) root = tree.getroot() return addrFrom(root, debug) def urlFromXml(xml_string, debug=0): root = ET.fromstring(xml_string) return addrFrom(root, debug) def addrFrom(root, debug=0): if debug>0: for child in root: print(f'tag: {child.tag}, attribute: {child.attrib}') for cchild in child: print(f' tag: {cchild.tag}, attribute: {cchild.attrib}') for ccchild in cchild: print(f' tag: {ccchild.tag}, attribute: {ccchild.attrib}') for cccchild in ccchild: print(f' tag: {cccchild.tag}, attribute: {cccchild.attrib}') for ccccchild in cccchild: print(f' tag: {ccccchild.tag}, attribute: {ccccchild.attrib}') pass pass pass pass pass deviceInfo = root.find("{urn:schemas-upnp-org:device-1-0}device").find("{urn:schemas-sony-com:av}X_ScalarWebAPI_DeviceInfo") uri = urllib.parse.unquote(deviceInfo.find('{urn:schemas-sony-com:av}X_ScalarWebAPI_ImagingDevice').find('{urn:schemas-sony-com:av}X_ScalarWebAPI_LiveView_URL').text, 'utf-8') for service in deviceInfo.find('{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceList').findall('{urn:schemas-sony-com:av}X_ScalarWebAPI_Service'): print(service.find('{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceType').text) if service.find('{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceType').text == 'camera': camera = urllib.parse.unquote(service.find('{urn:schemas-sony-com:av}X_ScalarWebAPI_ActionList_URL').text).split('/') cameraHost = camera[2] cameraUrl = '/' + camera[3] + '/camera' print(f'control point: {camera}') if debug >0: print(f'parsed: {uri}') splitted = uri.split('/') host = uri.split('/')[2] url = '/'.join(uri.split('/')[3:len(splitted)]) if debug > 0: print(f'host: {host}, url: {url}') return uri, host, url, cameraHost, cameraUrl if __name__ == '__main__': urlFromFile('./Qx10Example.xml', 1)
en
0.44423
#!/usr/bin/python # -*- coding: utf-8 -*-
2.428971
2
rest_notifications/tests/test_notifications.py
yeti/rest_notifications
1
6631549
<filename>rest_notifications/tests/test_notifications.py<gh_stars>1-10 from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.contenttypes.models import ContentType from django.core import mail from django.core.urlresolvers import reverse from mock import MagicMock from manticore_django.manticore_django.utils import get_class from rest_core.rest_core.test import ManticomTestCase from rest_notifications.rest_notifications.models import create_notification, Notification, NotificationSetting from rest_notifications.rest_notifications.utils import send_email_notification, send_push_notification, PushwooshClient from rest_social.rest_social.models import Comment from rest_social.rest_social.utils import get_social_model from rest_user.rest_user.test.factories import UserFactory __author__ = 'baylee' User = get_user_model() SocialModel = get_social_model() SocialFactory = get_class(settings.SOCIAL_MODEL_FACTORY) class NotificationsTestCase(ManticomTestCase): def setUp(self): super(NotificationsTestCase, self).setUp() self.social_obj = SocialFactory() self.receiver = UserFactory() self.reporter = UserFactory() PushwooshClient.invoke = MagicMock(return_value={"status_code": 200}) def test_create_pushwoosh_token(self): url = reverse("pushwoosh_token") data = { "token": "ABC123", "language": "en", "hwid": "XYZ456" } self.assertManticomPOSTResponse(url, "$pushwooshTokenRequest", "$pushwooshTokenResponse", data, self.receiver) # Non-authenticated users can't create a token self.assertManticomPOSTResponse(url, "$pushwooshTokenRequest", "$pushwooshTokenResponse", data, None, unauthorized=True) # Can't create token if write-only language and hwid data is missing bad_data = { "token": "ABC123" } self.add_credentials(self.receiver) response = self.client.post(url, bad_data, format="json") self.assertHttpBadRequest(response) def test_email_notification_sent(self): message = "<h1>You have a notification!</h1>" send_email_notification(self.receiver, message) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, settings.EMAIL_NOTIFICATION_SUBJECT) self.assertEqual(mail.outbox[0].body, "You have a notification!") self.assertEqual(mail.outbox[0].alternatives, [(message, "text/html")]) def test_push_notification_sent(self): message = "<h1>You have a notification!</h1>" response = send_push_notification(self.receiver, message) self.assertEqual(response["status_code"], 200) def test_create_notification(self): notification_count = Notification.objects.count() # If receiver is the same as the reporter, a notification is not created create_notification(self.receiver, self.receiver, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) self.assertEqual(notification_count, Notification.objects.count()) # If the receiver and reporter are different, a notification is created create_notification(self.receiver, self.reporter, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) self.assertEqual(notification_count + 1, Notification.objects.count()) def test_correct_notification_type_sent(self): setting = NotificationSetting.objects.get(notification_type=settings.NOTIFICATION_TYPES[0][0], user=self.receiver) # An email and a push are sent if allow_email and allow_push are True create_notification(self.receiver, self.reporter, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) self.assertEqual(len(mail.outbox), 1) self.assertTrue(len(PushwooshClient.invoke.mock_calls), 1) # No new email is sent if allow_email is False setting.allow_email = False setting.save() create_notification(self.receiver, self.reporter, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) self.assertEqual(len(mail.outbox), 1) self.assertTrue(len(PushwooshClient.invoke.mock_calls), 2) # If allow_push is False and allow_email True, an email is sent and a push isn't setting.allow_email = True setting.allow_push = False setting.save() create_notification(self.receiver, self.reporter, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) self.assertEqual(len(mail.outbox), 2) self.assertTrue(len(PushwooshClient.invoke.mock_calls), 2) def test_can_only_see_own_notifications(self): create_notification(self.receiver, self.reporter, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) create_notification(self.reporter, self.receiver, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) url = reverse("notifications") response = self.assertManticomGETResponse(url, None, "$notificationResponse", self.receiver) self.assertEqual(response.data["count"], self.receiver.notifications_received.count()) def test_comment_creates_notification(self): url = reverse("comments-list") content_type = ContentType.objects.get_for_model(SocialModel) data = { "content_type": content_type.pk, "object_id": self.social_obj.pk, "description": "Yeti are cool" } self.assertManticomPOSTResponse(url, "$commentRequest", "$commentResponse", data, self.reporter) notification_count = Notification.objects.filter(user=self.social_obj.user, reporter=self.reporter, content_type=ContentType.objects.get_for_model(SocialModel), notification_type=Notification.TYPES.comment).count() self.assertEquals(notification_count, 1) def test_follow_creates_notification(self): url = reverse("follows-list") content_type = ContentType.objects.get_for_model(User) data = { "content_type": content_type.pk, "object_id": self.receiver.pk, } self.assertManticomPOSTResponse(url, "$followRequest", "$followResponse", data, self.reporter) notification_count = Notification.objects.filter(user=self.receiver, reporter=self.reporter, content_type=ContentType.objects.get_for_model(User), notification_type=Notification.TYPES.follow).count() self.assertEquals(notification_count, 1) def test_share_creates_notification(self): url = reverse("shares-list") content_type = ContentType.objects.get_for_model(SocialModel) data = { "content_type": content_type.pk, "object_id": self.social_obj.pk, "shared_with": [self.receiver.pk] } self.assertManticomPOSTResponse(url, "$shareRequest", "$shareResponse", data, self.reporter) notification_count = Notification.objects.filter(user=self.receiver, reporter=self.reporter, content_type=ContentType.objects.get_for_model(SocialModel), notification_type=Notification.TYPES.share).count() self.assertEquals(notification_count, 1) def test_like_creates_notification(self): url = reverse("likes-list") content_type = ContentType.objects.get_for_model(SocialModel) data = { "content_type": content_type.pk, "object_id": self.social_obj.pk, } self.assertManticomPOSTResponse(url, "$likeRequest", "$likeResponse", data, self.reporter) notification_count = Notification.objects.filter(user=self.social_obj.user, reporter=self.reporter, content_type=ContentType.objects.get_for_model(SocialModel), notification_type=Notification.TYPES.like).count() self.assertEquals(notification_count, 1) def comment_mention_creates_notification(self): """ User receives a notification when their username is @mentioned, even if they are not the owner of the post """ url = reverse("comments-list") content_type = ContentType.objects.get_for_model(SocialModel) data = { "content_type": content_type.pk, "object_id": SocialFactory().pk, "description": "@{} look at my cool comment!".format(self.social_obj.user.username) } self.assertManticomPOSTResponse(url, "$commentRequest", "$commentResponse", data, self.reporter) notification_count = Notification.objects.filter(user=self.social_obj.user, reporter=self.reporter, content_type=ContentType.objects.get_for_model(Comment), notification_type=Notification.TYPES.mention).count() self.assertEquals(notification_count, 1)
<filename>rest_notifications/tests/test_notifications.py<gh_stars>1-10 from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.contenttypes.models import ContentType from django.core import mail from django.core.urlresolvers import reverse from mock import MagicMock from manticore_django.manticore_django.utils import get_class from rest_core.rest_core.test import ManticomTestCase from rest_notifications.rest_notifications.models import create_notification, Notification, NotificationSetting from rest_notifications.rest_notifications.utils import send_email_notification, send_push_notification, PushwooshClient from rest_social.rest_social.models import Comment from rest_social.rest_social.utils import get_social_model from rest_user.rest_user.test.factories import UserFactory __author__ = 'baylee' User = get_user_model() SocialModel = get_social_model() SocialFactory = get_class(settings.SOCIAL_MODEL_FACTORY) class NotificationsTestCase(ManticomTestCase): def setUp(self): super(NotificationsTestCase, self).setUp() self.social_obj = SocialFactory() self.receiver = UserFactory() self.reporter = UserFactory() PushwooshClient.invoke = MagicMock(return_value={"status_code": 200}) def test_create_pushwoosh_token(self): url = reverse("pushwoosh_token") data = { "token": "ABC123", "language": "en", "hwid": "XYZ456" } self.assertManticomPOSTResponse(url, "$pushwooshTokenRequest", "$pushwooshTokenResponse", data, self.receiver) # Non-authenticated users can't create a token self.assertManticomPOSTResponse(url, "$pushwooshTokenRequest", "$pushwooshTokenResponse", data, None, unauthorized=True) # Can't create token if write-only language and hwid data is missing bad_data = { "token": "ABC123" } self.add_credentials(self.receiver) response = self.client.post(url, bad_data, format="json") self.assertHttpBadRequest(response) def test_email_notification_sent(self): message = "<h1>You have a notification!</h1>" send_email_notification(self.receiver, message) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, settings.EMAIL_NOTIFICATION_SUBJECT) self.assertEqual(mail.outbox[0].body, "You have a notification!") self.assertEqual(mail.outbox[0].alternatives, [(message, "text/html")]) def test_push_notification_sent(self): message = "<h1>You have a notification!</h1>" response = send_push_notification(self.receiver, message) self.assertEqual(response["status_code"], 200) def test_create_notification(self): notification_count = Notification.objects.count() # If receiver is the same as the reporter, a notification is not created create_notification(self.receiver, self.receiver, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) self.assertEqual(notification_count, Notification.objects.count()) # If the receiver and reporter are different, a notification is created create_notification(self.receiver, self.reporter, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) self.assertEqual(notification_count + 1, Notification.objects.count()) def test_correct_notification_type_sent(self): setting = NotificationSetting.objects.get(notification_type=settings.NOTIFICATION_TYPES[0][0], user=self.receiver) # An email and a push are sent if allow_email and allow_push are True create_notification(self.receiver, self.reporter, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) self.assertEqual(len(mail.outbox), 1) self.assertTrue(len(PushwooshClient.invoke.mock_calls), 1) # No new email is sent if allow_email is False setting.allow_email = False setting.save() create_notification(self.receiver, self.reporter, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) self.assertEqual(len(mail.outbox), 1) self.assertTrue(len(PushwooshClient.invoke.mock_calls), 2) # If allow_push is False and allow_email True, an email is sent and a push isn't setting.allow_email = True setting.allow_push = False setting.save() create_notification(self.receiver, self.reporter, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) self.assertEqual(len(mail.outbox), 2) self.assertTrue(len(PushwooshClient.invoke.mock_calls), 2) def test_can_only_see_own_notifications(self): create_notification(self.receiver, self.reporter, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) create_notification(self.reporter, self.receiver, self.social_obj, settings.NOTIFICATION_TYPES[0][0]) url = reverse("notifications") response = self.assertManticomGETResponse(url, None, "$notificationResponse", self.receiver) self.assertEqual(response.data["count"], self.receiver.notifications_received.count()) def test_comment_creates_notification(self): url = reverse("comments-list") content_type = ContentType.objects.get_for_model(SocialModel) data = { "content_type": content_type.pk, "object_id": self.social_obj.pk, "description": "Yeti are cool" } self.assertManticomPOSTResponse(url, "$commentRequest", "$commentResponse", data, self.reporter) notification_count = Notification.objects.filter(user=self.social_obj.user, reporter=self.reporter, content_type=ContentType.objects.get_for_model(SocialModel), notification_type=Notification.TYPES.comment).count() self.assertEquals(notification_count, 1) def test_follow_creates_notification(self): url = reverse("follows-list") content_type = ContentType.objects.get_for_model(User) data = { "content_type": content_type.pk, "object_id": self.receiver.pk, } self.assertManticomPOSTResponse(url, "$followRequest", "$followResponse", data, self.reporter) notification_count = Notification.objects.filter(user=self.receiver, reporter=self.reporter, content_type=ContentType.objects.get_for_model(User), notification_type=Notification.TYPES.follow).count() self.assertEquals(notification_count, 1) def test_share_creates_notification(self): url = reverse("shares-list") content_type = ContentType.objects.get_for_model(SocialModel) data = { "content_type": content_type.pk, "object_id": self.social_obj.pk, "shared_with": [self.receiver.pk] } self.assertManticomPOSTResponse(url, "$shareRequest", "$shareResponse", data, self.reporter) notification_count = Notification.objects.filter(user=self.receiver, reporter=self.reporter, content_type=ContentType.objects.get_for_model(SocialModel), notification_type=Notification.TYPES.share).count() self.assertEquals(notification_count, 1) def test_like_creates_notification(self): url = reverse("likes-list") content_type = ContentType.objects.get_for_model(SocialModel) data = { "content_type": content_type.pk, "object_id": self.social_obj.pk, } self.assertManticomPOSTResponse(url, "$likeRequest", "$likeResponse", data, self.reporter) notification_count = Notification.objects.filter(user=self.social_obj.user, reporter=self.reporter, content_type=ContentType.objects.get_for_model(SocialModel), notification_type=Notification.TYPES.like).count() self.assertEquals(notification_count, 1) def comment_mention_creates_notification(self): """ User receives a notification when their username is @mentioned, even if they are not the owner of the post """ url = reverse("comments-list") content_type = ContentType.objects.get_for_model(SocialModel) data = { "content_type": content_type.pk, "object_id": SocialFactory().pk, "description": "@{} look at my cool comment!".format(self.social_obj.user.username) } self.assertManticomPOSTResponse(url, "$commentRequest", "$commentResponse", data, self.reporter) notification_count = Notification.objects.filter(user=self.social_obj.user, reporter=self.reporter, content_type=ContentType.objects.get_for_model(Comment), notification_type=Notification.TYPES.mention).count() self.assertEquals(notification_count, 1)
en
0.945189
# Non-authenticated users can't create a token # Can't create token if write-only language and hwid data is missing # If receiver is the same as the reporter, a notification is not created # If the receiver and reporter are different, a notification is created # An email and a push are sent if allow_email and allow_push are True # No new email is sent if allow_email is False # If allow_push is False and allow_email True, an email is sent and a push isn't User receives a notification when their username is @mentioned, even if they are not the owner of the post
2.024799
2
lib/core/psp.py
TotalVariation/Flattenet
3
6631550
from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F from .conv import conv1x1 class PyramidPooling(nn.Module): """ Reference: Zhao, Hengshuang, et al. *"Pyramid scene parsing network."* """ def __init__(self, in_channels, pool_sizes, norm_layer=nn.BatchNorm2d): super(PyramidPooling, self).__init__() branches = [] out_channels = int(in_channels/4) for s in pool_sizes: branches.append( nn.Sequential(OrderedDict([ ('pool', nn.AdaptiveAvgPool2d(s)), ('conv', conv1x1(in_channels, out_channels)), ('norm', norm_layer(out_channels)), ('relu', nn.ReLU(True)), ])) ) self.branches = nn.ModuleList(branches) # bilinear interpolation options self._up_kwargs = {'mode': 'bilinear', 'align_corners': False} def forward(self, x): _, _, h, w = x.size() feat = [] for b in self.branches: out = F.interpolate(b(x), size=(h, w), **self._up_kwargs) feat.append(out) return torch.cat(feat, 1)
from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F from .conv import conv1x1 class PyramidPooling(nn.Module): """ Reference: Zhao, Hengshuang, et al. *"Pyramid scene parsing network."* """ def __init__(self, in_channels, pool_sizes, norm_layer=nn.BatchNorm2d): super(PyramidPooling, self).__init__() branches = [] out_channels = int(in_channels/4) for s in pool_sizes: branches.append( nn.Sequential(OrderedDict([ ('pool', nn.AdaptiveAvgPool2d(s)), ('conv', conv1x1(in_channels, out_channels)), ('norm', norm_layer(out_channels)), ('relu', nn.ReLU(True)), ])) ) self.branches = nn.ModuleList(branches) # bilinear interpolation options self._up_kwargs = {'mode': 'bilinear', 'align_corners': False} def forward(self, x): _, _, h, w = x.size() feat = [] for b in self.branches: out = F.interpolate(b(x), size=(h, w), **self._up_kwargs) feat.append(out) return torch.cat(feat, 1)
en
0.484141
Reference: Zhao, Hengshuang, et al. *"Pyramid scene parsing network."* # bilinear interpolation options
2.515069
3