Unnamed: 0
int64
0
2.44k
repo
stringlengths
32
81
hash
stringlengths
40
40
diff
stringlengths
113
1.17k
old_path
stringlengths
5
84
rewrite
stringlengths
34
79
initial_state
stringlengths
75
980
final_state
stringlengths
76
980
1,400
https://:@github.com/phelimb/cbg.git
d35f6bbd778114d6350e81ed1953fd33a5e4148d
@@ -21,7 +21,7 @@ def run(parser, args, conn_config): if i % 100000 == 0: mc.set_kmers(kmers, colour) kmers = [] - mc.set_kmers(kmers, i) + mc.set_kmers(kmers, colour) # kmers = inf.read().splitlines()
remcdbg/cmds/insert.py
ReplaceText(target='colour' @(24,28)->(24,29))
def run(parser, args, conn_config): if i % 100000 == 0: mc.set_kmers(kmers, colour) kmers = [] mc.set_kmers(kmers, i) # kmers = inf.read().splitlines()
def run(parser, args, conn_config): if i % 100000 == 0: mc.set_kmers(kmers, colour) kmers = [] mc.set_kmers(kmers, colour) # kmers = inf.read().splitlines()
1,401
https://:@github.com/phelimb/cbg.git
8bdef6f8a055e739860a17113a4fb565aec4542e
@@ -74,7 +74,7 @@ class AtlasSeq(object): def search(self, seq: hug.types.text=None, fasta_file: hug.types.text=None, threshold: hug.types.float_number=1.0): """Returns samples that contain the searched sequence. Use -f to search for sequence from fasta""" - if not seq or fasta_file: + if not seq or not fasta_file: return "-s or -f must be provided" return search(seq=seq, fasta_file=fasta_file, threshold=threshold, conn_config=CONN_CONFIG)
atlasseq/__main__.py
ReplaceText(target='not ' @(77,22)->(77,22))
class AtlasSeq(object): def search(self, seq: hug.types.text=None, fasta_file: hug.types.text=None, threshold: hug.types.float_number=1.0): """Returns samples that contain the searched sequence. Use -f to search for sequence from fasta""" if not seq or fasta_file: return "-s or -f must be provided" return search(seq=seq, fasta_file=fasta_file, threshold=threshold, conn_config=CONN_CONFIG)
class AtlasSeq(object): def search(self, seq: hug.types.text=None, fasta_file: hug.types.text=None, threshold: hug.types.float_number=1.0): """Returns samples that contain the searched sequence. Use -f to search for sequence from fasta""" if not seq or not fasta_file: return "-s or -f must be provided" return search(seq=seq, fasta_file=fasta_file, threshold=threshold, conn_config=CONN_CONFIG)
1,402
https://:@github.com/phelimb/cbg.git
6aeae99e79fe215eac5e4fa1defcb77882760891
@@ -24,5 +24,5 @@ def build(bloomfilter_filepaths, samples, graph): bloomfilters = [] for f in bloomfilter_filepaths: bloomfilters.append(load_bloomfilter(f)) - graph.build(bloomfilter_filepaths, samples) + graph.build(bloomfilters, samples) return {'result': 'success'}
bfg/cmds/build.py
ReplaceText(target='bloomfilters' @(27,16)->(27,37))
def build(bloomfilter_filepaths, samples, graph): bloomfilters = [] for f in bloomfilter_filepaths: bloomfilters.append(load_bloomfilter(f)) graph.build(bloomfilter_filepaths, samples) return {'result': 'success'}
def build(bloomfilter_filepaths, samples, graph): bloomfilters = [] for f in bloomfilter_filepaths: bloomfilters.append(load_bloomfilter(f)) graph.build(bloomfilters, samples) return {'result': 'success'}
1,403
https://:@github.com/sebpiq/pychedelic.git
8bdd4ad6adae0b03e5e84e4667caaf7001fdb4f3
@@ -28,7 +28,7 @@ class Buffer(object): else: raise StopIteration def pull(self, block_size, overlap=0, pad=False): - if overlap and overlap >= block_size: + if overlap and overlap > block_size: raise ValueError('overlap cannot be more than block_size') # First, get as much blocks of data as needed.
pychedelic/core/buffering.py
ReplaceText(target='>' @(31,31)->(31,33))
class Buffer(object): else: raise StopIteration def pull(self, block_size, overlap=0, pad=False): if overlap and overlap >= block_size: raise ValueError('overlap cannot be more than block_size') # First, get as much blocks of data as needed.
class Buffer(object): else: raise StopIteration def pull(self, block_size, overlap=0, pad=False): if overlap and overlap > block_size: raise ValueError('overlap cannot be more than block_size') # First, get as much blocks of data as needed.
1,404
https://:@github.com/SilMon/NucDetect.git
422787856116a7ba2236825d558f51e59988945c
@@ -65,7 +65,7 @@ class Detector: :param multi_analysis: Needed for multiprocess-analysis :return: The analysis results as dict """ - if multi_analysis: + if ml_analysis: self.analyser = FCN() start = time.time() logging = logging if self.logging is None else self.logging
core/Detector.py
ReplaceText(target='ml_analysis' @(68,11)->(68,25))
class Detector: :param multi_analysis: Needed for multiprocess-analysis :return: The analysis results as dict """ if multi_analysis: self.analyser = FCN() start = time.time() logging = logging if self.logging is None else self.logging
class Detector: :param multi_analysis: Needed for multiprocess-analysis :return: The analysis results as dict """ if ml_analysis: self.analyser = FCN() start = time.time() logging = logging if self.logging is None else self.logging
1,405
https://:@github.com/lafrech/qbirthday.git
6c3b545ef66a656d55df7c6b297cadae0b97c491
@@ -40,7 +40,7 @@ class DataBase(object): # new entries can be saved self.CAN_SAVE = can_save # additional config options for database connection or fukebane(s) - self.HAS_CONFIG = can_save + self.HAS_CONFIG = has_config # the widget for additional config self.widget = widget
src/gbirthday/databases/__init__.py
ReplaceText(target='has_config' @(43,26)->(43,34))
class DataBase(object): # new entries can be saved self.CAN_SAVE = can_save # additional config options for database connection or fukebane(s) self.HAS_CONFIG = can_save # the widget for additional config self.widget = widget
class DataBase(object): # new entries can be saved self.CAN_SAVE = can_save # additional config options for database connection or fukebane(s) self.HAS_CONFIG = has_config # the widget for additional config self.widget = widget
1,406
https://:@github.com/c00w/btcnet_info.git
2501fff74e5c798da1bf15ebf40006198d1f1198
@@ -46,7 +46,7 @@ class Site(baseobject.Base_Object): value = self.wrapper.handle(dict(self.config.items(item))) if value: setattr(self, item, value) - self.fields.add(value) + self.fields.add(item) def __repr__(self): return '<Difficulty Site %s, %s>' % (self.name, str(self.coins))
difficulty_sites.py
ReplaceText(target='item' @(49,32)->(49,37))
class Site(baseobject.Base_Object): value = self.wrapper.handle(dict(self.config.items(item))) if value: setattr(self, item, value) self.fields.add(value) def __repr__(self): return '<Difficulty Site %s, %s>' % (self.name, str(self.coins))
class Site(baseobject.Base_Object): value = self.wrapper.handle(dict(self.config.items(item))) if value: setattr(self, item, value) self.fields.add(item) def __repr__(self): return '<Difficulty Site %s, %s>' % (self.name, str(self.coins))
1,407
https://:@github.com/nitros12/ics.py.git
9e76a3eef5e0d1d1e3d1a2b0fd48d45e9d3e2c5e
@@ -76,7 +76,7 @@ class ContentLine: params = {} for paramstr in params_strings: if '=' not in paramstr: - raise ParseError("No '=' in line '{}'".format(line)) + raise ParseError("No '=' in line '{}'".format(paramstr)) pname, pvals = paramstr.split('=', 1) params[pname] = pvals.split(',') return cls(name, params, value)
ics/parse.py
ReplaceText(target='paramstr' @(79,62)->(79,66))
class ContentLine: params = {} for paramstr in params_strings: if '=' not in paramstr: raise ParseError("No '=' in line '{}'".format(line)) pname, pvals = paramstr.split('=', 1) params[pname] = pvals.split(',') return cls(name, params, value)
class ContentLine: params = {} for paramstr in params_strings: if '=' not in paramstr: raise ParseError("No '=' in line '{}'".format(paramstr)) pname, pvals = paramstr.split('=', 1) params[pname] = pvals.split(',') return cls(name, params, value)
1,408
https://:@github.com/shirtsgroup/cg_openmm.git
40e8d834fd0fb437b40b888c844c9668739999f2
@@ -284,7 +284,7 @@ def build_mm_simulation(topology,system,positions,temperature=300.0 * unit.kelvi # print("to confirm their validity for these model settings,") # print("before performing a full simulation.") time_step_list = [(10.0 * (0.5 ** i)) * unit.femtosecond for i in range(0,14)] - simulation_time_step,force_cutoff = get_simulation_time_step(topology,system,positions,temperature,time_step_list,total_simulation_time) + simulation_time_step,force_cutoff = get_simulation_time_step(topology,system,positions,temperature,total_simulation_time,time_step_list) friction = 0.0 integrator = LangevinIntegrator(temperature._value,friction,simulation_time_step.in_units_of(unit.picosecond)._value)
src/build/cg_build.py
ArgSwap(idxs=4<->5 @(287,46)->(287,70))
def build_mm_simulation(topology,system,positions,temperature=300.0 * unit.kelvi # print("to confirm their validity for these model settings,") # print("before performing a full simulation.") time_step_list = [(10.0 * (0.5 ** i)) * unit.femtosecond for i in range(0,14)] simulation_time_step,force_cutoff = get_simulation_time_step(topology,system,positions,temperature,time_step_list,total_simulation_time) friction = 0.0 integrator = LangevinIntegrator(temperature._value,friction,simulation_time_step.in_units_of(unit.picosecond)._value)
def build_mm_simulation(topology,system,positions,temperature=300.0 * unit.kelvi # print("to confirm their validity for these model settings,") # print("before performing a full simulation.") time_step_list = [(10.0 * (0.5 ** i)) * unit.femtosecond for i in range(0,14)] simulation_time_step,force_cutoff = get_simulation_time_step(topology,system,positions,temperature,total_simulation_time,time_step_list) friction = 0.0 integrator = LangevinIntegrator(temperature._value,friction,simulation_time_step.in_units_of(unit.picosecond)._value)
1,409
https://:@github.com/shirtsgroup/cg_openmm.git
47b5ee9ca688b3513f64fafab62f088b91bef6e5
@@ -252,7 +252,7 @@ def test_expectations_fraction_contacts_pdb(tmpdir): # Test free energy fitting / derivative calculation: ddeltaF_out, d2deltaF_out, spline_tck = get_free_energy_derivative( deltaF_values, - temperature_list, + full_T_list, plotfile=f"{output_directory}/ddeltaF_dT.pdf", )
cg_openmm/tests/test_native_contacts.py
ReplaceText(target='full_T_list' @(255,8)->(255,24))
def test_expectations_fraction_contacts_pdb(tmpdir): # Test free energy fitting / derivative calculation: ddeltaF_out, d2deltaF_out, spline_tck = get_free_energy_derivative( deltaF_values, temperature_list, plotfile=f"{output_directory}/ddeltaF_dT.pdf", )
def test_expectations_fraction_contacts_pdb(tmpdir): # Test free energy fitting / derivative calculation: ddeltaF_out, d2deltaF_out, spline_tck = get_free_energy_derivative( deltaF_values, full_T_list, plotfile=f"{output_directory}/ddeltaF_dT.pdf", )
1,410
https://:@github.com/wplct/yzs-work.git
b7c79866bf15c8a7a30b242f80944e8853331048
@@ -35,7 +35,7 @@ class ResourceCodeManage: """ resource_code = self.map.get(code) if not resource_code: - if resource_code != 0: + if code != 0: warnings.warn('未知错误码', DeprecationWarning) return "" return resource_code.get_message()
yzs/tastypie_extend/response_code.py
ReplaceText(target='code' @(38,15)->(38,28))
class ResourceCodeManage: """ resource_code = self.map.get(code) if not resource_code: if resource_code != 0: warnings.warn('未知错误码', DeprecationWarning) return "" return resource_code.get_message()
class ResourceCodeManage: """ resource_code = self.map.get(code) if not resource_code: if code != 0: warnings.warn('未知错误码', DeprecationWarning) return "" return resource_code.get_message()
1,411
https://:@github.com/wplct/yzs-work.git
1d84b0acfacc7c85a3bad11c5c1e4435663125b7
@@ -12,7 +12,7 @@ logger = logging.getLogger('system') def upload_aliyun_oss(folder): - if hasattr(settings, 'ALIYUN_OSS'): + if not hasattr(settings, 'ALIYUN_OSS'): raise Exception('未配置oss') AccessKeyId = settings.ALIYUN_OSS["AccessKeyId"] AccessKeySecret = settings.ALIYUN_OSS["AccessKeySecret"]
yzs/django_extend/image_upload.py
ReplaceText(target='not ' @(15,7)->(15,7))
logger = logging.getLogger('system') def upload_aliyun_oss(folder): if hasattr(settings, 'ALIYUN_OSS'): raise Exception('未配置oss') AccessKeyId = settings.ALIYUN_OSS["AccessKeyId"] AccessKeySecret = settings.ALIYUN_OSS["AccessKeySecret"]
logger = logging.getLogger('system') def upload_aliyun_oss(folder): if not hasattr(settings, 'ALIYUN_OSS'): raise Exception('未配置oss') AccessKeyId = settings.ALIYUN_OSS["AccessKeyId"] AccessKeySecret = settings.ALIYUN_OSS["AccessKeySecret"]
1,412
https://:@github.com/tohojo/netperf-wrapper.git
9d88395014ba142084b83f8f02fc545976dcdcb9
@@ -162,7 +162,7 @@ class TimeseriesAggregator(Aggregator): # size first_times = [i[0][0] for i in measurements.values() if i and i[0]] last_times = [i[-1][0] for i in measurements.values() if i and i[-1]] - if not (first_times or last_times): + if not (first_times and last_times): raise RuntimeError(u"No data to aggregate. Run with -l and check log file to investigate.") t_0 = min(first_times) t_max = max(last_times)
aggregators.py
ReplaceText(target='and' @(165,28)->(165,30))
class TimeseriesAggregator(Aggregator): # size first_times = [i[0][0] for i in measurements.values() if i and i[0]] last_times = [i[-1][0] for i in measurements.values() if i and i[-1]] if not (first_times or last_times): raise RuntimeError(u"No data to aggregate. Run with -l and check log file to investigate.") t_0 = min(first_times) t_max = max(last_times)
class TimeseriesAggregator(Aggregator): # size first_times = [i[0][0] for i in measurements.values() if i and i[0]] last_times = [i[-1][0] for i in measurements.values() if i and i[-1]] if not (first_times and last_times): raise RuntimeError(u"No data to aggregate. Run with -l and check log file to investigate.") t_0 = min(first_times) t_max = max(last_times)
1,413
https://:@github.com/tohojo/netperf-wrapper.git
39ec5a73b1c9a09e9e01881e89b53e807c1d2832
@@ -422,7 +422,7 @@ class ResultWidget(get_ui_class("resultwidget.ui")): return self.settings.ZERO_Y def disable_log(self, val=None): - if val is not None and val != self.settings.LOG_SCALE: + if val is not None and val == self.settings.LOG_SCALE: self.settings.LOG_SCALE = not val self.update() return not self.settings.LOG_SCALE
netperf_wrapper/gui.py
ReplaceText(target='==' @(425,35)->(425,37))
class ResultWidget(get_ui_class("resultwidget.ui")): return self.settings.ZERO_Y def disable_log(self, val=None): if val is not None and val != self.settings.LOG_SCALE: self.settings.LOG_SCALE = not val self.update() return not self.settings.LOG_SCALE
class ResultWidget(get_ui_class("resultwidget.ui")): return self.settings.ZERO_Y def disable_log(self, val=None): if val is not None and val == self.settings.LOG_SCALE: self.settings.LOG_SCALE = not val self.update() return not self.settings.LOG_SCALE
1,414
https://:@github.com/tohojo/netperf-wrapper.git
79e727a68773475c9b0d903102f4bf5eb9593466
@@ -281,7 +281,7 @@ class BatchRunner(object): settings.load_test(informational=settings.BATCH_DRY) settings.DATA_FILENAME = self.gen_filename(settings, b, argset, rep) - yield batch, settings + yield b, settings def get_argsets(self, batch): argsets = []
flent/batch.py
ReplaceText(target='b' @(284,18)->(284,23))
class BatchRunner(object): settings.load_test(informational=settings.BATCH_DRY) settings.DATA_FILENAME = self.gen_filename(settings, b, argset, rep) yield batch, settings def get_argsets(self, batch): argsets = []
class BatchRunner(object): settings.load_test(informational=settings.BATCH_DRY) settings.DATA_FILENAME = self.gen_filename(settings, b, argset, rep) yield b, settings def get_argsets(self, batch): argsets = []
1,415
https://:@github.com/tohojo/netperf-wrapper.git
20f7794d4f06646cb739372e5fa9d55489a7ea9d
@@ -133,7 +133,7 @@ def diff_parts(strings, sep): a separator and pruning parts that are identical for all strings""" parts = [s.split(sep) for s in strings] - np = [p for p in zip(*parts) if len(set(p)) > 1] + np = [p for p in zip(*parts) if len(set(p)) >= 1] return [sep.join(p) for p in zip(*np)]
flent/util.py
ReplaceText(target='>=' @(136,48)->(136,49))
def diff_parts(strings, sep): a separator and pruning parts that are identical for all strings""" parts = [s.split(sep) for s in strings] np = [p for p in zip(*parts) if len(set(p)) > 1] return [sep.join(p) for p in zip(*np)]
def diff_parts(strings, sep): a separator and pruning parts that are identical for all strings""" parts = [s.split(sep) for s in strings] np = [p for p in zip(*parts) if len(set(p)) >= 1] return [sep.join(p) for p in zip(*np)]
1,416
https://:@github.com/tohojo/netperf-wrapper.git
07c49ab905f90817ee4ade86784241a257dc15bd
@@ -1310,7 +1310,7 @@ class Plotter(ArgParam): if self.absolute_time: start += results.t0 - if end < 0: + if end <= 0: end += results.meta("TOTAL_LENGTH") min_idx = data[0].searchsorted(start, side='right')
flent/plotters.py
ReplaceText(target='<=' @(1313,19)->(1313,20))
class Plotter(ArgParam): if self.absolute_time: start += results.t0 if end < 0: end += results.meta("TOTAL_LENGTH") min_idx = data[0].searchsorted(start, side='right')
class Plotter(ArgParam): if self.absolute_time: start += results.t0 if end <= 0: end += results.meta("TOTAL_LENGTH") min_idx = data[0].searchsorted(start, side='right')
1,417
https://:@github.com/ShagaleevAlexey/openapi-core.git
56be4b10eb6dfa1020d451626a2f58f836f1729c
@@ -654,4 +654,4 @@ class TestPetstore(object): response_result = response_validator.validate(request, response) assert response_result.errors == [] - assert response_result.data == data + assert response_result.data == data_json
tests/integration/test_petstore.py
ReplaceText(target='data_json' @(657,39)->(657,43))
class TestPetstore(object): response_result = response_validator.validate(request, response) assert response_result.errors == [] assert response_result.data == data
class TestPetstore(object): response_result = response_validator.validate(request, response) assert response_result.errors == [] assert response_result.data == data_json
1,418
https://:@github.com/markreidvfx/pyavb.git
8c822578aa3a9eef8da895125d6911e998d3b933
@@ -331,7 +331,7 @@ class MSMLocator(core.AVBObject): self.mob_id = mob_id elif tag == 0x03: read_assert_tag(f, 76) - self.last_known_volume_utf8 = read_string(length, 'utf-8') + self.last_known_volume_utf8 = read_string(f, 'utf-8') else: raise ValueError("%s: unknown ext tag 0x%02X %d" % (str(self.class_id), tag,tag))
avb/misc.py
ReplaceText(target='f' @(334,58)->(334,64))
class MSMLocator(core.AVBObject): self.mob_id = mob_id elif tag == 0x03: read_assert_tag(f, 76) self.last_known_volume_utf8 = read_string(length, 'utf-8') else: raise ValueError("%s: unknown ext tag 0x%02X %d" % (str(self.class_id), tag,tag))
class MSMLocator(core.AVBObject): self.mob_id = mob_id elif tag == 0x03: read_assert_tag(f, 76) self.last_known_volume_utf8 = read_string(f, 'utf-8') else: raise ValueError("%s: unknown ext tag 0x%02X %d" % (str(self.class_id), tag,tag))
1,419
https://:@github.com/franklingu/comp-match.git
5fda1eac69ced79b9f489039e060840131f44184
@@ -65,7 +65,7 @@ class CompanyUnderline(object): def setup_country(self): """Set country based on known information """ - if not hasattr(self, 'exchange') and self.exchange is None: + if not hasattr(self, 'exchange') or self.exchange is None: return exch_country = find_country_for_exchange(self.exchange) if hasattr(self, 'country') and self.country:
src/matchers/base.py
ReplaceText(target='or' @(68,41)->(68,44))
class CompanyUnderline(object): def setup_country(self): """Set country based on known information """ if not hasattr(self, 'exchange') and self.exchange is None: return exch_country = find_country_for_exchange(self.exchange) if hasattr(self, 'country') and self.country:
class CompanyUnderline(object): def setup_country(self): """Set country based on known information """ if not hasattr(self, 'exchange') or self.exchange is None: return exch_country = find_country_for_exchange(self.exchange) if hasattr(self, 'country') and self.country:
1,420
https://:@github.com/drtconway/zotmer.git
e1742c21684efe296bbf81a90abbfb45e6927d89
@@ -36,7 +36,7 @@ def main(argv): inp = opts['<input>'] out = opts['<output>'] (m, _) = probe(inp) - if opts['-D'] is not None: + if opts['-D'] is None: if opts['-S'] is not None: S = long(opts['-S']) random.seed(S)
zotmer/commands/sample.py
ReplaceText(target=' is ' @(39,17)->(39,25))
def main(argv): inp = opts['<input>'] out = opts['<output>'] (m, _) = probe(inp) if opts['-D'] is not None: if opts['-S'] is not None: S = long(opts['-S']) random.seed(S)
def main(argv): inp = opts['<input>'] out = opts['<output>'] (m, _) = probe(inp) if opts['-D'] is None: if opts['-S'] is not None: S = long(opts['-S']) random.seed(S)
1,421
https://:@github.com/programatt/drf-extensions.git
19ade585f7ce22c2018cdc0c73f5499f7ceaf877
@@ -135,7 +135,7 @@ class ExtendedActionLinkRouterMixin(object): dynamic_routes_instances.append(Route( url=replace_methodname(route.url, endpoint), mapping=dict((httpmethod, methodname) for httpmethod in httpmethods), - name=replace_methodname(route.name, methodname), + name=replace_methodname(route.name, endpoint), initkwargs=initkwargs, )) return dynamic_routes_instances
rest_framework_extensions/routers.py
ReplaceText(target='endpoint' @(138,52)->(138,62))
class ExtendedActionLinkRouterMixin(object): dynamic_routes_instances.append(Route( url=replace_methodname(route.url, endpoint), mapping=dict((httpmethod, methodname) for httpmethod in httpmethods), name=replace_methodname(route.name, methodname), initkwargs=initkwargs, )) return dynamic_routes_instances
class ExtendedActionLinkRouterMixin(object): dynamic_routes_instances.append(Route( url=replace_methodname(route.url, endpoint), mapping=dict((httpmethod, methodname) for httpmethod in httpmethods), name=replace_methodname(route.name, endpoint), initkwargs=initkwargs, )) return dynamic_routes_instances
1,422
https://:@github.com/level12/keg-login.git
30308ddd1b45a74b89c280636ef9208fede53ae4
@@ -67,7 +67,7 @@ class PasswordResetTokenGenerator: return False # Check the timestamp is within limit - if (self._num_days(self._today()) - ts) > self.timeout_days: + if (self._num_days(self._today()) - ts) >= self.timeout_days: return False return True
keg_login/tokens.py
ReplaceText(target='>=' @(70,48)->(70,49))
class PasswordResetTokenGenerator: return False # Check the timestamp is within limit if (self._num_days(self._today()) - ts) > self.timeout_days: return False return True
class PasswordResetTokenGenerator: return False # Check the timestamp is within limit if (self._num_days(self._today()) - ts) >= self.timeout_days: return False return True
1,423
https://:@github.com/xtqxk/owl.git
3c8c2ad1dde6ffc87307e6e72fff42d42771bbde
@@ -54,7 +54,7 @@ def main_tornado(): application = tornado.web.Application([(r"/", MainHandler)]) http_server = tornado.httpserver.HTTPServer(application) http_server.listen(options.port) - loop.start() + xcfg.start() if __name__ == "__main__":
demo/tornado_and_aiohttp.py
ReplaceText(target='xcfg' @(57,4)->(57,8))
def main_tornado(): application = tornado.web.Application([(r"/", MainHandler)]) http_server = tornado.httpserver.HTTPServer(application) http_server.listen(options.port) loop.start() if __name__ == "__main__":
def main_tornado(): application = tornado.web.Application([(r"/", MainHandler)]) http_server = tornado.httpserver.HTTPServer(application) http_server.listen(options.port) xcfg.start() if __name__ == "__main__":
1,424
https://:@github.com/bburan/abr.git
24ce8fdca6aa62c713476e7a8ff984adcb98c6e4
@@ -291,7 +291,7 @@ class SerialWaveformPresenter(WaveformPresenter): self.load(model) def load_next(self): - if self.current_model > len(self.unprocessed): + if self.current_model >= len(self.unprocessed): return self.current_model += 1 filename, frequency = self.unprocessed[self.current_model]
abr/presenter.py
ReplaceText(target='>=' @(294,30)->(294,31))
class SerialWaveformPresenter(WaveformPresenter): self.load(model) def load_next(self): if self.current_model > len(self.unprocessed): return self.current_model += 1 filename, frequency = self.unprocessed[self.current_model]
class SerialWaveformPresenter(WaveformPresenter): self.load(model) def load_next(self): if self.current_model >= len(self.unprocessed): return self.current_model += 1 filename, frequency = self.unprocessed[self.current_model]
1,425
https://:@github.com/caiovictormc/mqtt-sentinel.git
e582bfc4161cda70200ac4c84ec6d166d2b64386
@@ -31,7 +31,7 @@ class WatcherWorker: raise NotImplementedError() def is_available(self): - return len(self.subscribed_topics) <= self.max_topics + return len(self.subscribed_topics) < self.max_topics def _subscribe(self, topic): self.subscribed_topics.append(str(topic))
sentinel/watcher/resources.py
ReplaceText(target='<' @(34,43)->(34,45))
class WatcherWorker: raise NotImplementedError() def is_available(self): return len(self.subscribed_topics) <= self.max_topics def _subscribe(self, topic): self.subscribed_topics.append(str(topic))
class WatcherWorker: raise NotImplementedError() def is_available(self): return len(self.subscribed_topics) < self.max_topics def _subscribe(self, topic): self.subscribed_topics.append(str(topic))
1,426
https://:@github.com/nitely/http-lazy-headers.git
1dd901dcf50242d6beb031195c7039ef03d68252
@@ -55,7 +55,7 @@ def is_quoted_cookie_octets(txt): if len(txt) <= 2: return False - if (not txt.startswith('"') and + if (not txt.startswith('"') or not txt.endswith('"')): return False
http_lazy_headers/shared/common/cookies.py
ReplaceText(target='or' @(58,32)->(58,35))
def is_quoted_cookie_octets(txt): if len(txt) <= 2: return False if (not txt.startswith('"') and not txt.endswith('"')): return False
def is_quoted_cookie_octets(txt): if len(txt) <= 2: return False if (not txt.startswith('"') or not txt.endswith('"')): return False
1,427
https://:@github.com/AndreMiras/EtherollApp.git
dba25b52e26f01e84195aaf0f06598120fde6186
@@ -18,7 +18,7 @@ class CoincurveRecipe(PythonRecipe): env['LDSHARED'] = env['CC'] + ' -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions' libsecp256k1 = self.get_recipe('libsecp256k1', self.ctx) libsecp256k1_dir = libsecp256k1.get_build_dir(arch.arch) - env['CFLAGS'] = ' -I' + os.path.join(libsecp256k1_dir, 'include') + env['CFLAGS'] += ' -I' + os.path.join(libsecp256k1_dir, 'include') # required additional library and path for Crystax if self.ctx.ndk == 'crystax': # only keeps major.minor (discards patch)
src/python-for-android/recipes/coincurve/__init__.py
ReplaceText(target='+=' @(21,22)->(21,23))
class CoincurveRecipe(PythonRecipe): env['LDSHARED'] = env['CC'] + ' -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions' libsecp256k1 = self.get_recipe('libsecp256k1', self.ctx) libsecp256k1_dir = libsecp256k1.get_build_dir(arch.arch) env['CFLAGS'] = ' -I' + os.path.join(libsecp256k1_dir, 'include') # required additional library and path for Crystax if self.ctx.ndk == 'crystax': # only keeps major.minor (discards patch)
class CoincurveRecipe(PythonRecipe): env['LDSHARED'] = env['CC'] + ' -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions' libsecp256k1 = self.get_recipe('libsecp256k1', self.ctx) libsecp256k1_dir = libsecp256k1.get_build_dir(arch.arch) env['CFLAGS'] += ' -I' + os.path.join(libsecp256k1_dir, 'include') # required additional library and path for Crystax if self.ctx.ndk == 'crystax': # only keeps major.minor (discards patch)
1,428
https://:@github.com/Kenny2github/brainfuck-fuck.git
e7370575f02852774b61cbf6d3d6251f30c4f14c
@@ -14,7 +14,7 @@ def main(args=None): if '--stats' in args: args.remove('--stats') args.append('--stats') - if len(args) > 1: + if len(args) > 0: if args[0] == '-c': prog = args[1].strip('"') else:
brainfuck_fuck/bf.py
ReplaceText(target='0' @(17,19)->(17,20))
def main(args=None): if '--stats' in args: args.remove('--stats') args.append('--stats') if len(args) > 1: if args[0] == '-c': prog = args[1].strip('"') else:
def main(args=None): if '--stats' in args: args.remove('--stats') args.append('--stats') if len(args) > 0: if args[0] == '-c': prog = args[1].strip('"') else:
1,429
https://:@github.com/gpiantoni/boavus.git
1dc1245dead4d4c120ace0254b7bd6b9b56c1ad6
@@ -35,6 +35,6 @@ def project_electrodes(electrodes_file, freesurfer_path): material = one_chans['material'] f.write(f'{_chan.label}\t{xyz}\t{elec_type}\t{size}\t{material}\n') - old_json = replace_underscore(Path(f.filename), 'coordframe.json') + old_json = replace_underscore(Path(electrodes_file.filename), 'coordframe.json') new_json = replace_underscore(tsv_electrodes, 'coordframe.json') copyfile(old_json, new_json) # TODO: add info about transformation
boavus/ieeg/project_electrodes.py
ReplaceText(target='electrodes_file' @(38,39)->(38,40))
def project_electrodes(electrodes_file, freesurfer_path): material = one_chans['material'] f.write(f'{_chan.label}\t{xyz}\t{elec_type}\t{size}\t{material}\n') old_json = replace_underscore(Path(f.filename), 'coordframe.json') new_json = replace_underscore(tsv_electrodes, 'coordframe.json') copyfile(old_json, new_json) # TODO: add info about transformation
def project_electrodes(electrodes_file, freesurfer_path): material = one_chans['material'] f.write(f'{_chan.label}\t{xyz}\t{elec_type}\t{size}\t{material}\n') old_json = replace_underscore(Path(electrodes_file.filename), 'coordframe.json') new_json = replace_underscore(tsv_electrodes, 'coordframe.json') copyfile(old_json, new_json) # TODO: add info about transformation
1,430
https://:@github.com/gpiantoni/boavus.git
6d383d0e6961f8c55db7d5a33aaf8b933d3a52de
@@ -33,7 +33,7 @@ def main(output_dir): p.starmap(save_frequency, args) else: for arg in args: - save_frequency(*args) + save_frequency(*arg) def save_frequency(ieeg_file, cond):
boavus/ieeg/psd.py
ReplaceText(target='arg' @(36,28)->(36,32))
def main(output_dir): p.starmap(save_frequency, args) else: for arg in args: save_frequency(*args) def save_frequency(ieeg_file, cond):
def main(output_dir): p.starmap(save_frequency, args) else: for arg in args: save_frequency(*arg) def save_frequency(ieeg_file, cond):
1,431
https://:@github.com/gpiantoni/boavus.git
427af8efb68c77d24e4a1b88e14307817559dae0
@@ -140,4 +140,4 @@ def make_segments(dat): out.axis['time'][i] = dat.axis['time'][0] out.axis['chan'][i] = dat.axis['chan'][0] - return dat + return out
boavus/ieeg/preprocessing.py
ReplaceText(target='out' @(143,11)->(143,14))
def make_segments(dat): out.axis['time'][i] = dat.axis['time'][0] out.axis['chan'][i] = dat.axis['chan'][0] return dat
def make_segments(dat): out.axis['time'][i] = dat.axis['time'][0] out.axis['chan'][i] = dat.axis['chan'][0] return out
1,432
https://:@github.com/UseAllFive/django-google-cloud-storage.git
6a63ffe6f06b4781099586ccff32b77877c93ae8
@@ -94,7 +94,7 @@ class GoogleCloudStorage(Storage): return self.created_time(name) def url(self, name): - if os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine'): + if not os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine'): # we need this in order to display images, links to files, etc from the local appengine server filename = "/gs"+self.location+"/"+name key = create_gs_key(filename)
django_google_cloud_storage/__init__.py
ReplaceText(target='not ' @(97,11)->(97,11))
class GoogleCloudStorage(Storage): return self.created_time(name) def url(self, name): if os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine'): # we need this in order to display images, links to files, etc from the local appengine server filename = "/gs"+self.location+"/"+name key = create_gs_key(filename)
class GoogleCloudStorage(Storage): return self.created_time(name) def url(self, name): if not os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine'): # we need this in order to display images, links to files, etc from the local appengine server filename = "/gs"+self.location+"/"+name key = create_gs_key(filename)
1,433
https://:@github.com/ConsumerAffairs/django-experiments.git
9f7d04c505a1fb1fd70eca209ec1ab285731b5bd
@@ -35,7 +35,7 @@ def clear(key, participant_identifier): # Remove the direct entry cache_key = COUNTER_CACHE_KEY % key pipe = r.pipeline() - freq, _ = pipe.hget(key, participant_identifier).hdel(cache_key, participant_identifier).execute() + freq, _ = pipe.hget(cache_key, participant_identifier).hdel(cache_key, participant_identifier).execute() # Remove from the histogram freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
experiments/counters.py
ReplaceText(target='cache_key' @(38,28)->(38,31))
def clear(key, participant_identifier): # Remove the direct entry cache_key = COUNTER_CACHE_KEY % key pipe = r.pipeline() freq, _ = pipe.hget(key, participant_identifier).hdel(cache_key, participant_identifier).execute() # Remove from the histogram freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
def clear(key, participant_identifier): # Remove the direct entry cache_key = COUNTER_CACHE_KEY % key pipe = r.pipeline() freq, _ = pipe.hget(cache_key, participant_identifier).hdel(cache_key, participant_identifier).execute() # Remove from the histogram freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
1,434
https://:@github.com/broiledmeat/pydgeot.git
54f264d9a0c8613e6c4c30819554777f0c785200
@@ -74,7 +74,7 @@ class Generator: for s in self.app.sources.get_dependencies(c, reverse=True, recursive=True)]) # Add source and context dependencies to the changeset. - changes.generate |= source_deps | context_deps + dep_changes.generate |= source_deps | context_deps # Prepare dependent changes that weren't in the original changes list for path in (dep_changes.generate - changes.generate):
pydgeot/generator.py
ReplaceText(target='dep_changes' @(77,12)->(77,19))
class Generator: for s in self.app.sources.get_dependencies(c, reverse=True, recursive=True)]) # Add source and context dependencies to the changeset. changes.generate |= source_deps | context_deps # Prepare dependent changes that weren't in the original changes list for path in (dep_changes.generate - changes.generate):
class Generator: for s in self.app.sources.get_dependencies(c, reverse=True, recursive=True)]) # Add source and context dependencies to the changeset. dep_changes.generate |= source_deps | context_deps # Prepare dependent changes that weren't in the original changes list for path in (dep_changes.generate - changes.generate):
1,435
https://:@github.com/chameleoncloud/jupyterlab-zenodo.git
14517a0c6f765c0f9470fdae3de98fb82d4d36de
@@ -189,7 +189,7 @@ class ZenodoUploadHandler(ZenodoBaseHandler): print("doi: "+str(doi)) self.set_status(201) self.write(json.dumps(info)) - store_record(doi, filename, directory_to_zip, access_token) + store_record(doi, path_to_file, directory_to_zip, access_token) #self.redirect("http://127.0.0.1:7000/portal/upload/"+doi) self.finish() else:
jupyterlab_zenodo/upload.py
ReplaceText(target='path_to_file' @(192,30)->(192,38))
class ZenodoUploadHandler(ZenodoBaseHandler): print("doi: "+str(doi)) self.set_status(201) self.write(json.dumps(info)) store_record(doi, filename, directory_to_zip, access_token) #self.redirect("http://127.0.0.1:7000/portal/upload/"+doi) self.finish() else:
class ZenodoUploadHandler(ZenodoBaseHandler): print("doi: "+str(doi)) self.set_status(201) self.write(json.dumps(info)) store_record(doi, path_to_file, directory_to_zip, access_token) #self.redirect("http://127.0.0.1:7000/portal/upload/"+doi) self.finish() else:
1,436
https://:@github.com/overdev/raylib-py.git
db84739a19f8719c1f58cf6946741e29f1ef0262
@@ -2126,7 +2126,7 @@ _rl.InitWindow.argtypes = [Int, Int, CharPtr] _rl.InitWindow.restype = None def init_window(width: int, height: int, title: AnyStr) -> None: """Initialize window and OpenGL context""" - return _rl.InitWindow(_int(width), _int(width), _str_in(title)) + return _rl.InitWindow(_int(width), _int(height), _str_in(title)) _rl.CloseWindow.argtypes = _NOARGS
raylibpy/__init__.py
ReplaceText(target='height' @(2129,44)->(2129,49))
_rl.InitWindow.argtypes = [Int, Int, CharPtr] _rl.InitWindow.restype = None def init_window(width: int, height: int, title: AnyStr) -> None: """Initialize window and OpenGL context""" return _rl.InitWindow(_int(width), _int(width), _str_in(title)) _rl.CloseWindow.argtypes = _NOARGS
_rl.InitWindow.argtypes = [Int, Int, CharPtr] _rl.InitWindow.restype = None def init_window(width: int, height: int, title: AnyStr) -> None: """Initialize window and OpenGL context""" return _rl.InitWindow(_int(width), _int(height), _str_in(title)) _rl.CloseWindow.argtypes = _NOARGS
1,437
https://:@github.com/NCI-GDC/bam_readgroup_to_gdc_json.git
3e7833e8ec3195f638bbdd3e7a56db429259db73
@@ -237,7 +237,7 @@ def extract_readgroup_json(bam_path, logger): bam_name, bam_ext = os.path.splitext(bam_file) readgroups_json_file = bam_name+'.json' with open (bam_path) as f: - samfile = pysam.AlignmentFile(bam_path, 'rb', check_header=True, check_sq=False) + samfile = pysam.AlignmentFile(f, 'rb', check_header=True, check_sq=False) readgroup_dict_list = get_readgroup_dict_list(samfile) with open(readgroups_json_file, 'w') as f: json.dump(out_readgroup_dict_list, f, indent=4)
bam_readgroup_to_gdc_json/extract_readgroup.py
ReplaceText(target='f' @(240,38)->(240,46))
def extract_readgroup_json(bam_path, logger): bam_name, bam_ext = os.path.splitext(bam_file) readgroups_json_file = bam_name+'.json' with open (bam_path) as f: samfile = pysam.AlignmentFile(bam_path, 'rb', check_header=True, check_sq=False) readgroup_dict_list = get_readgroup_dict_list(samfile) with open(readgroups_json_file, 'w') as f: json.dump(out_readgroup_dict_list, f, indent=4)
def extract_readgroup_json(bam_path, logger): bam_name, bam_ext = os.path.splitext(bam_file) readgroups_json_file = bam_name+'.json' with open (bam_path) as f: samfile = pysam.AlignmentFile(f, 'rb', check_header=True, check_sq=False) readgroup_dict_list = get_readgroup_dict_list(samfile) with open(readgroups_json_file, 'w') as f: json.dump(out_readgroup_dict_list, f, indent=4)
1,438
https://:@github.com/NCI-GDC/bam_readgroup_to_gdc_json.git
e4573f93ab837ef2cd81c5276d089af9ba788575
@@ -239,7 +239,7 @@ def extract_readgroup_json(bam_path, logger): if not bam_readgroup_dict_list: logger.error('There are no readgroups in BAM: {}'.format(samfile.filename)) raise NoReadGroupError - readgroup_dict_list = get_readgroup_dict_list(samfile, logger) + readgroup_dict_list = get_readgroup_dict_list(bam_readgroup_dict_list, logger) with open(readgroups_json_file, 'w') as f: json.dump(readgroup_dict_list, f, indent=4) return readgroups_json_file
bam_readgroup_to_gdc_json/extract_readgroup.py
ReplaceText(target='bam_readgroup_dict_list' @(242,54)->(242,61))
def extract_readgroup_json(bam_path, logger): if not bam_readgroup_dict_list: logger.error('There are no readgroups in BAM: {}'.format(samfile.filename)) raise NoReadGroupError readgroup_dict_list = get_readgroup_dict_list(samfile, logger) with open(readgroups_json_file, 'w') as f: json.dump(readgroup_dict_list, f, indent=4) return readgroups_json_file
def extract_readgroup_json(bam_path, logger): if not bam_readgroup_dict_list: logger.error('There are no readgroups in BAM: {}'.format(samfile.filename)) raise NoReadGroupError readgroup_dict_list = get_readgroup_dict_list(bam_readgroup_dict_list, logger) with open(readgroups_json_file, 'w') as f: json.dump(readgroup_dict_list, f, indent=4) return readgroups_json_file
1,439
https://:@github.com/Tynukua/mono.git
58368594340912bca030c0b559ce4e3bac205960
@@ -11,7 +11,7 @@ def snakeize_dict(dict_): answer = {} for key in dict_: nkey = snakeize_s(key) - answer[key] = dict_[key] + answer[nkey] = dict_[key] return answer class MonoCard:
mono/types.py
ReplaceText(target='nkey' @(14,15)->(14,18))
def snakeize_dict(dict_): answer = {} for key in dict_: nkey = snakeize_s(key) answer[key] = dict_[key] return answer class MonoCard:
def snakeize_dict(dict_): answer = {} for key in dict_: nkey = snakeize_s(key) answer[nkey] = dict_[key] return answer class MonoCard:
1,440
https://:@github.com/Helios-Protocol/py-helios-node.git
24c35bc490eb8737da1d5a9ec90ad3be3ea5eecd
@@ -24,7 +24,7 @@ def setup_trinity_logging(level): logger.setLevel(logging.DEBUG) logger.addHandler(handler) - listener = handlers.QueueListener(log_queue, logger) + listener = handlers.QueueListener(log_queue, handler) return logger, log_queue, listener
trinity/utils/logging.py
ReplaceText(target='handler' @(27,49)->(27,55))
def setup_trinity_logging(level): logger.setLevel(logging.DEBUG) logger.addHandler(handler) listener = handlers.QueueListener(log_queue, logger) return logger, log_queue, listener
def setup_trinity_logging(level): logger.setLevel(logging.DEBUG) logger.addHandler(handler) listener = handlers.QueueListener(log_queue, handler) return logger, log_queue, listener
1,441
https://:@github.com/Helios-Protocol/py-helios-node.git
8f99b5b6628e75a80ca695750fb313ecb796bb49
@@ -273,7 +273,7 @@ class BaseTransactionExecutor(metaclass=ABCMeta): valid_transaction = self.validate_transaction(transaction) message = self.build_evm_message(valid_transaction) computation = self.build_computation(message, valid_transaction) - finalized_computation = self.finalize_computation(computation, valid_transaction) + finalized_computation = self.finalize_computation(valid_transaction, computation) return finalized_computation @abstractmethod
evm/vm/state.py
ArgSwap(idxs=0<->1 @(276,32)->(276,57))
class BaseTransactionExecutor(metaclass=ABCMeta): valid_transaction = self.validate_transaction(transaction) message = self.build_evm_message(valid_transaction) computation = self.build_computation(message, valid_transaction) finalized_computation = self.finalize_computation(computation, valid_transaction) return finalized_computation @abstractmethod
class BaseTransactionExecutor(metaclass=ABCMeta): valid_transaction = self.validate_transaction(transaction) message = self.build_evm_message(valid_transaction) computation = self.build_computation(message, valid_transaction) finalized_computation = self.finalize_computation(valid_transaction, computation) return finalized_computation @abstractmethod
1,442
https://:@github.com/vectorhacker/pygeteventstore.git
c311377e282413e5b2e5763eed5931e61d713aa1
@@ -44,7 +44,7 @@ class Reader(object): self._url = path if self._index < 0: - if self._feed_page is not feedparser.FeedParserDict: + if self._feed_page is feedparser.FeedParserDict: for link in self._feed_page.links: if link.rel == 'previous': self._url = link.href
geteventstore/read_stream.py
ReplaceText(target=' is ' @(47,30)->(47,38))
class Reader(object): self._url = path if self._index < 0: if self._feed_page is not feedparser.FeedParserDict: for link in self._feed_page.links: if link.rel == 'previous': self._url = link.href
class Reader(object): self._url = path if self._index < 0: if self._feed_page is feedparser.FeedParserDict: for link in self._feed_page.links: if link.rel == 'previous': self._url = link.href
1,443
https://:@github.com/javicacheiro/configuration-registry.git
c49d2d6ad4ce1edb22023d6ea97973b8d0f045b7
@@ -745,4 +745,4 @@ def dn_from(id): Basically the ID string is equivalent to a DN but without certain characters that can cause problems like '/' """ - return id.replace('.', DOT).replace(SLASH, '/') + return id.replace(DOT, '.').replace(SLASH, '/')
registry.py
ArgSwap(idxs=0<->1 @(748,11)->(748,21))
def dn_from(id): Basically the ID string is equivalent to a DN but without certain characters that can cause problems like '/' """ return id.replace('.', DOT).replace(SLASH, '/')
def dn_from(id): Basically the ID string is equivalent to a DN but without certain characters that can cause problems like '/' """ return id.replace(DOT, '.').replace(SLASH, '/')
1,444
https://:@github.com/andrefreitas/schwa.git
4a16e3bb49f2afe9c4749c2d8ab68a4f05f66761
@@ -38,7 +38,7 @@ class JavaParser(AbstractParser): if current_class: current_class[1] = last_closing_bracket_number if current_method: - current_method[1] = last_closing_bracket_number + current_method[1] = penultimate_closing_bracket_number components.append(current_method) current_class = [line_counter, 0, search.group(2)] continue
schwa/parsing/java_parser.py
ReplaceText(target='penultimate_closing_bracket_number' @(41,40)->(41,67))
class JavaParser(AbstractParser): if current_class: current_class[1] = last_closing_bracket_number if current_method: current_method[1] = last_closing_bracket_number components.append(current_method) current_class = [line_counter, 0, search.group(2)] continue
class JavaParser(AbstractParser): if current_class: current_class[1] = last_closing_bracket_number if current_method: current_method[1] = penultimate_closing_bracket_number components.append(current_method) current_class = [line_counter, 0, search.group(2)] continue
1,445
https://:@github.com/pyconuk/conferencescheduler-cli.git
74d3dbfc5c23ac119d2a22f259363f0912245e0b
@@ -83,7 +83,7 @@ def build(algorithm, objective, diff, input_dir, solution_dir, build_dir): if diff: schedule = solution_to_schedule(solution, events, slots) - event_diff = event_schedule_difference(schedule, original_schedule) + event_diff = event_schedule_difference(original_schedule, schedule) logger.debug(f'\nevent_diff:') for item in event_diff: logger.debug(f'{item.event.name} has moved from {item.old_slot.venue} at {item.old_slot.starts_at} to {item.new_slot.venue} at {item.new_slot.starts_at}')
src/scheduler/cli.py
ArgSwap(idxs=0<->1 @(86,21)->(86,46))
def build(algorithm, objective, diff, input_dir, solution_dir, build_dir): if diff: schedule = solution_to_schedule(solution, events, slots) event_diff = event_schedule_difference(schedule, original_schedule) logger.debug(f'\nevent_diff:') for item in event_diff: logger.debug(f'{item.event.name} has moved from {item.old_slot.venue} at {item.old_slot.starts_at} to {item.new_slot.venue} at {item.new_slot.starts_at}')
def build(algorithm, objective, diff, input_dir, solution_dir, build_dir): if diff: schedule = solution_to_schedule(solution, events, slots) event_diff = event_schedule_difference(original_schedule, schedule) logger.debug(f'\nevent_diff:') for item in event_diff: logger.debug(f'{item.event.name} has moved from {item.old_slot.venue} at {item.old_slot.starts_at} to {item.new_slot.venue} at {item.new_slot.starts_at}')
1,446
https://:@github.com/kangasbros/django-bitcoin.git
0efe50d2a1ad192492ae360834e1403600cceb69
@@ -46,7 +46,7 @@ class RatingManager(object): object_id = self.instance.id, key = self.field.key, ) - if not user: + if user: kwargs['user'] = user else: kwargs['user__isnull'] = True
djangoratings/__init__.py
ReplaceText(target='' @(49,11)->(49,15))
class RatingManager(object): object_id = self.instance.id, key = self.field.key, ) if not user: kwargs['user'] = user else: kwargs['user__isnull'] = True
class RatingManager(object): object_id = self.instance.id, key = self.field.key, ) if user: kwargs['user'] = user else: kwargs['user__isnull'] = True
1,447
https://:@github.com/faerbit/grade_change_emailer.git
e7c3f657704adf5a2e76cd22f29523f19a8f9113
@@ -79,7 +79,7 @@ class GradeChangeEmailer: if old_html_table != html_table: mail_text = "<head> <meta charset='utf-8'></head><body>" - mail_text = "Es gab Änderungen in deinen Noten:\n" + mail_text += "Es gab Änderungen in deinen Noten:\n" mail_text += html_table mail_text += "</body>" self.send_mail(mail_text)
main.py
ReplaceText(target='+=' @(82,22)->(82,23))
class GradeChangeEmailer: if old_html_table != html_table: mail_text = "<head> <meta charset='utf-8'></head><body>" mail_text = "Es gab Änderungen in deinen Noten:\n" mail_text += html_table mail_text += "</body>" self.send_mail(mail_text)
class GradeChangeEmailer: if old_html_table != html_table: mail_text = "<head> <meta charset='utf-8'></head><body>" mail_text += "Es gab Änderungen in deinen Noten:\n" mail_text += html_table mail_text += "</body>" self.send_mail(mail_text)
1,448
https://:@github.com/dcramer/nose-quickunit.git
777b3ba3e0a7fd5948fff6f88583d61c1b6c2f2f
@@ -72,7 +72,7 @@ class QuickUnitPlugin(Plugin): self.verbosity = options.verbosity if options.quickunit_prefix: self.prefixes = options.quickunit_prefix - if len(self.prefixes) == 0: + if len(self.prefixes) == 1: self.prefixes = self.prefixes[0].split('\n') else: self.prefixes = ["tests/"]
quickunit/plugin.py
ReplaceText(target='1' @(75,37)->(75,38))
class QuickUnitPlugin(Plugin): self.verbosity = options.verbosity if options.quickunit_prefix: self.prefixes = options.quickunit_prefix if len(self.prefixes) == 0: self.prefixes = self.prefixes[0].split('\n') else: self.prefixes = ["tests/"]
class QuickUnitPlugin(Plugin): self.verbosity = options.verbosity if options.quickunit_prefix: self.prefixes = options.quickunit_prefix if len(self.prefixes) == 1: self.prefixes = self.prefixes[0].split('\n') else: self.prefixes = ["tests/"]
1,449
https://:@gitlab.com/atviriduomenys/spinta.git
97920623601fc3be0e8623366b29e0cf1125903f
@@ -972,7 +972,7 @@ class QueryBuilder: if _is_dtype(prop, (String, DateTime, Date)): field = jsonb.astext else: - field = sa.cast(field, JSONB) + field = sa.cast(jsonb, JSONB) else: field = self.table.main.c[prop.name]
spinta/backends/postgresql/__init__.py
ReplaceText(target='jsonb' @(975,32)->(975,37))
class QueryBuilder: if _is_dtype(prop, (String, DateTime, Date)): field = jsonb.astext else: field = sa.cast(field, JSONB) else: field = self.table.main.c[prop.name]
class QueryBuilder: if _is_dtype(prop, (String, DateTime, Date)): field = jsonb.astext else: field = sa.cast(jsonb, JSONB) else: field = self.table.main.c[prop.name]
1,450
https://:@github.com/eduardostarling/restio.git
417c8fd882a8e922f831ca4ed8999607d3a09867
@@ -155,7 +155,7 @@ class TestModel: model_b._internal_id = model_a._internal_id - assert model_a == model_b + assert model_a != model_b @pytest.mark.parametrize( "model, expected_fields, expected_dependency_fields",
tests/unit/test_model.py
ReplaceText(target='!=' @(158,23)->(158,25))
class TestModel: model_b._internal_id = model_a._internal_id assert model_a == model_b @pytest.mark.parametrize( "model, expected_fields, expected_dependency_fields",
class TestModel: model_b._internal_id = model_a._internal_id assert model_a != model_b @pytest.mark.parametrize( "model, expected_fields, expected_dependency_fields",
1,451
https://:@github.com/dbehrlich/PsychRNN.git
aa2e83a6667bb398dc2b65dfac17a17b3c6766b5
@@ -54,7 +54,7 @@ class FlipFlop(Task): for i, (input_start, echo_start) in enumerate(zip(input_times, echo_times)): if input_start <= t < input_start + echo_start: - x_t = 1.0 + x_t += 1.0 mask_t = np.zeros(self.N_out) elif echo_start <= t < echo_start + echo_duration:
psychrnn/tasks/flip_flop.py
ReplaceText(target='+=' @(57,20)->(57,21))
class FlipFlop(Task): for i, (input_start, echo_start) in enumerate(zip(input_times, echo_times)): if input_start <= t < input_start + echo_start: x_t = 1.0 mask_t = np.zeros(self.N_out) elif echo_start <= t < echo_start + echo_duration:
class FlipFlop(Task): for i, (input_start, echo_start) in enumerate(zip(input_times, echo_times)): if input_start <= t < input_start + echo_start: x_t += 1.0 mask_t = np.zeros(self.N_out) elif echo_start <= t < echo_start + echo_duration:
1,452
https://:@github.com/funkybob/knights-templater.git
8d4ea7037e74574d98dc376b7413e36b3fd6aa8e
@@ -29,7 +29,7 @@ def load_template(name, paths=None, raw=False): with open(full_name, encoding='utf-8') as fin: src = fin.read() - return kompile(src, raw=raw, filename=name) + return kompile(src, raw=raw, filename=full_name) except FileNotFoundError: pass else:
knights/loader.py
ReplaceText(target='full_name' @(32,50)->(32,54))
def load_template(name, paths=None, raw=False): with open(full_name, encoding='utf-8') as fin: src = fin.read() return kompile(src, raw=raw, filename=name) except FileNotFoundError: pass else:
def load_template(name, paths=None, raw=False): with open(full_name, encoding='utf-8') as fin: src = fin.read() return kompile(src, raw=raw, filename=full_name) except FileNotFoundError: pass else:
1,453
https://:@github.com/pgeurin/predpy.git
dc4b2c89e719c6a94f3a4e0ab632aec2a5990c35
@@ -488,7 +488,7 @@ def compare_predictions(df, y_var_name, percent_data=None, timeit(plot_rocs, models, df_X, y) plt.show() print(f'MAKE SUBSAMPLE TIME: {time() - starttotal}') - return names, results, models, pipeline, df_X + return names, results, fit_models, pipeline, df_X def bootstrap_train_premade(model, X, y, bootstraps=1000, **kwargs): """Train a (linear) model on multiple bootstrap samples of some data and
autoregression/autoregression.py
ReplaceText(target='fit_models' @(491,27)->(491,33))
def compare_predictions(df, y_var_name, percent_data=None, timeit(plot_rocs, models, df_X, y) plt.show() print(f'MAKE SUBSAMPLE TIME: {time() - starttotal}') return names, results, models, pipeline, df_X def bootstrap_train_premade(model, X, y, bootstraps=1000, **kwargs): """Train a (linear) model on multiple bootstrap samples of some data and
def compare_predictions(df, y_var_name, percent_data=None, timeit(plot_rocs, models, df_X, y) plt.show() print(f'MAKE SUBSAMPLE TIME: {time() - starttotal}') return names, results, fit_models, pipeline, df_X def bootstrap_train_premade(model, X, y, bootstraps=1000, **kwargs): """Train a (linear) model on multiple bootstrap samples of some data and
1,454
https://:@github.com/datastax/cstar_perf.git
775971343c59a597c45041ae9937d8d41fbff31c
@@ -150,7 +150,7 @@ def nodetool(cmd): stderr=subprocess.STDOUT, shell=True) output = proc.communicate() - if output.returncode != 0: + if proc.returncode != 0: raise NodetoolException(output) return output[0]
tool/cstar_perf/tool/benchmark.py
ReplaceText(target='proc' @(153,7)->(153,13))
def nodetool(cmd): stderr=subprocess.STDOUT, shell=True) output = proc.communicate() if output.returncode != 0: raise NodetoolException(output) return output[0]
def nodetool(cmd): stderr=subprocess.STDOUT, shell=True) output = proc.communicate() if proc.returncode != 0: raise NodetoolException(output) return output[0]
1,455
https://:@github.com/azazel75/metapensiero.signal.git
58a8b6bfb2993e3137b59da1d83da816a67437ed
@@ -373,7 +373,7 @@ class Signal: if instance is None: fnotify = self._fnotify else: - fnotify = types.MethodType(instance, self._fnotify) + fnotify = types.MethodType(self._fnotify, instance) validator = self._fvalidation if validator is not None and instance is not None: validator = types.MethodType(validator, instance)
src/metapensiero/signal/core.py
ArgSwap(idxs=0<->1 @(376,26)->(376,42))
class Signal: if instance is None: fnotify = self._fnotify else: fnotify = types.MethodType(instance, self._fnotify) validator = self._fvalidation if validator is not None and instance is not None: validator = types.MethodType(validator, instance)
class Signal: if instance is None: fnotify = self._fnotify else: fnotify = types.MethodType(self._fnotify, instance) validator = self._fvalidation if validator is not None and instance is not None: validator = types.MethodType(validator, instance)
1,456
https://:@github.com/datasnakes/rinse.git
fe669e8c2ea26510a9509f954272544b95b50ddb
@@ -129,7 +129,7 @@ class LInstallR(InstallR): version_name = "R-%s" % version if Path(self.bin_path / "R").exists(): remove(str(self.bin_path / "R")) - symlink(str(self.lib_path / version_name / "bin" / "R"), str(self.bin_path / "R")) + symlink(str(self.bin_path / "R"), str(self.lib_path / version_name / "bin" / "R")) def clear_tmp_dir(self): # Set up the temporary directory for installation
rinse/core.py
ArgSwap(idxs=0<->1 @(132,8)->(132,15))
class LInstallR(InstallR): version_name = "R-%s" % version if Path(self.bin_path / "R").exists(): remove(str(self.bin_path / "R")) symlink(str(self.lib_path / version_name / "bin" / "R"), str(self.bin_path / "R")) def clear_tmp_dir(self): # Set up the temporary directory for installation
class LInstallR(InstallR): version_name = "R-%s" % version if Path(self.bin_path / "R").exists(): remove(str(self.bin_path / "R")) symlink(str(self.bin_path / "R"), str(self.lib_path / version_name / "bin" / "R")) def clear_tmp_dir(self): # Set up the temporary directory for installation
1,457
https://:@github.com/Ircam-Web/mezzanine-organization.git
5d66d26e44c82ff7f5c1fd02b8bd368ba1179650
@@ -255,7 +255,7 @@ def order_links(links): minor = link except TypeError: pass - ordered_links.append(link) + ordered_links.append(minor) links_list.remove(minor) return ordered_links
organization/core/templatetags/organization_tags.py
ReplaceText(target='minor' @(258,29)->(258,33))
def order_links(links): minor = link except TypeError: pass ordered_links.append(link) links_list.remove(minor) return ordered_links
def order_links(links): minor = link except TypeError: pass ordered_links.append(minor) links_list.remove(minor) return ordered_links
1,458
https://:@github.com/Ircam-Web/mezzanine-organization.git
ae8a7252690d9c7801549d30b8734e3e9d76af36
@@ -192,7 +192,7 @@ class TopicFilterForm(forms.Form): topics = ProjectTopic.objects.all() topics_list = [] - for topic in topics_list: + for topic in topics: if topic.projects.count(): topics_list.append((topic.id, topic.name)) return topics_list
organization/projects/forms.py
ReplaceText(target='topics' @(195,21)->(195,32))
class TopicFilterForm(forms.Form): topics = ProjectTopic.objects.all() topics_list = [] for topic in topics_list: if topic.projects.count(): topics_list.append((topic.id, topic.name)) return topics_list
class TopicFilterForm(forms.Form): topics = ProjectTopic.objects.all() topics_list = [] for topic in topics: if topic.projects.count(): topics_list.append((topic.id, topic.name)) return topics_list
1,459
https://:@github.com/Jumpscale/core9.git
5fc90cb3a2a909103cc735b979a55253cca15f68
@@ -1426,7 +1426,7 @@ class SystemFS: for root, dirs, files in os.walk(startDir): for name in files: if fnmatch.fnmatch(name, fileregex): - result.append(os.path.join(root, fileregex)) + result.append(os.path.join(root, name)) return result def grep(self, fileregex, lineregex):
JumpScale9/fs/SystemFS.py
ReplaceText(target='name' @(1429,53)->(1429,62))
class SystemFS: for root, dirs, files in os.walk(startDir): for name in files: if fnmatch.fnmatch(name, fileregex): result.append(os.path.join(root, fileregex)) return result def grep(self, fileregex, lineregex):
class SystemFS: for root, dirs, files in os.walk(startDir): for name in files: if fnmatch.fnmatch(name, fileregex): result.append(os.path.join(root, name)) return result def grep(self, fileregex, lineregex):
1,460
https://:@github.com/woctezuma/download-steam-reviews.git
b34328a97fe7a21d2fdb944fd3f3e1f16084dc27
@@ -216,7 +216,7 @@ def download_reviews_for_app_id(app_id, else: collection_keyword = 'first posted' print('Collecting reviews {} after {}'.format(collection_keyword, - timestamp_threshold)) + date_threshold)) review_dict = load_review_dict(app_id)
steamreviews/download_reviews.py
ReplaceText(target='date_threshold' @(219,58)->(219,77))
def download_reviews_for_app_id(app_id, else: collection_keyword = 'first posted' print('Collecting reviews {} after {}'.format(collection_keyword, timestamp_threshold)) review_dict = load_review_dict(app_id)
def download_reviews_for_app_id(app_id, else: collection_keyword = 'first posted' print('Collecting reviews {} after {}'.format(collection_keyword, date_threshold)) review_dict = load_review_dict(app_id)
1,461
https://:@github.com/merlin-neurotech/WizardHat.git
6c4b0cb72f062a0e59034f9a1483c6e207695f99
@@ -153,7 +153,7 @@ class PacketHandler(BasePacketHandler): # convert from packet to sample ID sample_id = (packet_id - 1) * 2 + delta_id + 1 # 19bit packets hold deltas between two samples - self._last_eeg_data += np.array(deltas[delta_id]) + self._last_eeg_data -= np.array(deltas[delta_id]) self._update_counts_and_enqueue("EEG", sample_id) def _parse_compressed_19bit(self, packet_id, packet):
ble2lsl/devices/ganglion/ganglion.py
ReplaceText(target='-=' @(156,32)->(156,34))
class PacketHandler(BasePacketHandler): # convert from packet to sample ID sample_id = (packet_id - 1) * 2 + delta_id + 1 # 19bit packets hold deltas between two samples self._last_eeg_data += np.array(deltas[delta_id]) self._update_counts_and_enqueue("EEG", sample_id) def _parse_compressed_19bit(self, packet_id, packet):
class PacketHandler(BasePacketHandler): # convert from packet to sample ID sample_id = (packet_id - 1) * 2 + delta_id + 1 # 19bit packets hold deltas between two samples self._last_eeg_data -= np.array(deltas[delta_id]) self._update_counts_and_enqueue("EEG", sample_id) def _parse_compressed_19bit(self, packet_id, packet):
1,462
https://:@github.com/PiecePaperCode/pyogame2.git
ba010e0c9b6c884195448c98c1ef493fba075624
@@ -616,7 +616,7 @@ class OGame2(object): fleets_list = [] response = self.session.get('https://s{}-{}.ogame.gameforge.com/game/index.php?page=ingame&component=movement' .format(self.server_number, self.server_language)) - if response.status_code == 302: + if response.status_code != 302: fleets = response.text.split('<div id="fleet') del fleets[0] for fleet in fleets:
pyogame2/__init__.py
ReplaceText(target='!=' @(619,32)->(619,34))
class OGame2(object): fleets_list = [] response = self.session.get('https://s{}-{}.ogame.gameforge.com/game/index.php?page=ingame&component=movement' .format(self.server_number, self.server_language)) if response.status_code == 302: fleets = response.text.split('<div id="fleet') del fleets[0] for fleet in fleets:
class OGame2(object): fleets_list = [] response = self.session.get('https://s{}-{}.ogame.gameforge.com/game/index.php?page=ingame&component=movement' .format(self.server_number, self.server_language)) if response.status_code != 302: fleets = response.text.split('<div id="fleet') del fleets[0] for fleet in fleets:
1,463
https://:@github.com/pstch/django-crucrudile.git
1eaef5f8ee85d2444a3993b73e03d60ace013b4e
@@ -21,7 +21,7 @@ class FilteredListView(ListView): filter_dict = {} - if not value in self.filter_keys: + if not key in self.filter_keys: raise ImproperlyConfigured( "%s is not present in filter_keys (%s)" % (key, self.filter_keys) )
django_pstch_helpers/views/filtered.py
ReplaceText(target='key' @(24,15)->(24,20))
class FilteredListView(ListView): filter_dict = {} if not value in self.filter_keys: raise ImproperlyConfigured( "%s is not present in filter_keys (%s)" % (key, self.filter_keys) )
class FilteredListView(ListView): filter_dict = {} if not key in self.filter_keys: raise ImproperlyConfigured( "%s is not present in filter_keys (%s)" % (key, self.filter_keys) )
1,464
https://:@github.com/pstch/django-crucrudile.git
399d14d8b52601ce12858af06a0e93f6bf2ade33
@@ -337,7 +337,7 @@ class AutoPatternsMixin(object): """View URL name (unprefixed, this is the name we give to url())""" return cls.get_url_name(view) - if view not in view.get_views(): + if view not in cls.get_views(): raise ImproperlyConfigured( "Tried to get the URL patterns for a view (%s)" " that is not defined by get_views" % view
django_crucrudile/models/mixins.py
ReplaceText(target='cls' @(340,23)->(340,27))
class AutoPatternsMixin(object): """View URL name (unprefixed, this is the name we give to url())""" return cls.get_url_name(view) if view not in view.get_views(): raise ImproperlyConfigured( "Tried to get the URL patterns for a view (%s)" " that is not defined by get_views" % view
class AutoPatternsMixin(object): """View URL name (unprefixed, this is the name we give to url())""" return cls.get_url_name(view) if view not in cls.get_views(): raise ImproperlyConfigured( "Tried to get the URL patterns for a view (%s)" " that is not defined by get_views" % view
1,465
https://:@github.com/pstch/django-crucrudile.git
fb0a3af53e4c57ea50164c28ecd872ae379e74b4
@@ -119,7 +119,7 @@ def make_model_mixin(view_class, setattr(ModelMixin, 'get_%s_url_name' % view_class.get_underscored_action_name(), - _get_url) + _get_url_name) if extra_funcs: for func_name, func in extra_funcs.items():
django_crucrudile/models/mixins.py
ReplaceText(target='_get_url_name' @(122,12)->(122,20))
def make_model_mixin(view_class, setattr(ModelMixin, 'get_%s_url_name' % view_class.get_underscored_action_name(), _get_url) if extra_funcs: for func_name, func in extra_funcs.items():
def make_model_mixin(view_class, setattr(ModelMixin, 'get_%s_url_name' % view_class.get_underscored_action_name(), _get_url_name) if extra_funcs: for func_name, func in extra_funcs.items():
1,466
https://:@github.com/gawseed/threat-feed-tools.git
da1b21ecf69131800d1d782e169de3f1a8535572
@@ -28,7 +28,7 @@ class FsdbThreatFeed(): for (count,entry) in enumerate(self._tfh): array.append(entry) dictionary[entry[index_column]] = entry # note, may erase older ones; build array? - if max_records and count >= max_records: + if max_records and count > max_records: break return (array, dictionary)
gawseed/threatfeed/feeds/fsdb.py
ReplaceText(target='>' @(31,37)->(31,39))
class FsdbThreatFeed(): for (count,entry) in enumerate(self._tfh): array.append(entry) dictionary[entry[index_column]] = entry # note, may erase older ones; build array? if max_records and count >= max_records: break return (array, dictionary)
class FsdbThreatFeed(): for (count,entry) in enumerate(self._tfh): array.append(entry) dictionary[entry[index_column]] = entry # note, may erase older ones; build array? if max_records and count > max_records: break return (array, dictionary)
1,467
https://:@github.com/gawseed/threat-feed-tools.git
354e753fde2635223ce189079a16b4b6a6ef8b36
@@ -62,7 +62,7 @@ class KafkaDataSource(DataSource): if self._end_time: self.verbose("searching forward from:") self.verbose(decoded_row) - count += 0 + count = 0 while True: count += 1 decoded_time = decoded_row[self._time_column]
gawseed/threatfeed/datasources/kafka.py
ReplaceText(target='=' @(65,18)->(65,20))
class KafkaDataSource(DataSource): if self._end_time: self.verbose("searching forward from:") self.verbose(decoded_row) count += 0 while True: count += 1 decoded_time = decoded_row[self._time_column]
class KafkaDataSource(DataSource): if self._end_time: self.verbose("searching forward from:") self.verbose(decoded_row) count = 0 while True: count += 1 decoded_time = decoded_row[self._time_column]
1,468
https://:@github.com/gawseed/threat-feed-tools.git
41e2731d9ccb0df59446cc0b0aa0f23ffe945a51
@@ -51,4 +51,4 @@ class ConnectionCounter(Config): results = {'connections': conns, 'ports': ports} - return (self._output_key, conns) + return (self._output_key, results)
gawseed/threatfeed/enrichments/connectionCounter.py
ReplaceText(target='results' @(54,34)->(54,39))
class ConnectionCounter(Config): results = {'connections': conns, 'ports': ports} return (self._output_key, conns)
class ConnectionCounter(Config): results = {'connections': conns, 'ports': ports} return (self._output_key, results)
1,469
https://:@github.com/jlazear/pyoscope.git
8d5c3639de70f788c3cfcde695678031f6a8acb7
@@ -409,7 +409,7 @@ class PyOscopeStatic(object): elif isinstance(y, Iterable): newy = y yname = 'y_{j}'.format(j=j) - elif isinstance(x, NoneType): + elif isinstance(y, NoneType): yname = None temp = self.data.columns[0] newy = range(len(self.data[temp]))
pyoscope.py
ReplaceText(target='y' @(412,28)->(412,29))
class PyOscopeStatic(object): elif isinstance(y, Iterable): newy = y yname = 'y_{j}'.format(j=j) elif isinstance(x, NoneType): yname = None temp = self.data.columns[0] newy = range(len(self.data[temp]))
class PyOscopeStatic(object): elif isinstance(y, Iterable): newy = y yname = 'y_{j}'.format(j=j) elif isinstance(y, NoneType): yname = None temp = self.data.columns[0] newy = range(len(self.data[temp]))
1,470
https://:@gitlab.com/LISTERINE/dj_arp_storm.git
06a3ee8610a39c0768a2ef98afdb930a1aa0c1cf
@@ -87,7 +87,7 @@ def get_timeout(conf): :rtype: int, float """ timeout = conf.getint('general', 'timeout') - if timeout > 0: + if timeout < 0: return float('inf') return timeout
dj_arp_storm/dj_arp_storm.py
ReplaceText(target='<' @(90,15)->(90,16))
def get_timeout(conf): :rtype: int, float """ timeout = conf.getint('general', 'timeout') if timeout > 0: return float('inf') return timeout
def get_timeout(conf): :rtype: int, float """ timeout = conf.getint('general', 'timeout') if timeout < 0: return float('inf') return timeout
1,471
https://:@github.com/mozilla/CANOSP-2019.git
cada35d7f44d921471da056f272f17c44705bb43
@@ -100,7 +100,7 @@ def server_update( # calculate the number of clients used in this round m = max(int(client_num * C), 1) # random set of m client's index - S = np.array(random.sample(range(client_num), client_num)) + S = np.array(random.sample(range(client_num), m)) num_samples = []
simulation_util.py
ReplaceText(target='m' @(103,54)->(103,64))
def server_update( # calculate the number of clients used in this round m = max(int(client_num * C), 1) # random set of m client's index S = np.array(random.sample(range(client_num), client_num)) num_samples = []
def server_update( # calculate the number of clients used in this round m = max(int(client_num * C), 1) # random set of m client's index S = np.array(random.sample(range(client_num), m)) num_samples = []
1,472
https://:@github.com/pattern-inc/cynetworkx.git
7ad6ebdb79672222fbd37571b9414d80a2eb31d1
@@ -679,7 +679,7 @@ def eigenvector_centrality(G,max_iter=100,tol=1.0e-6,nstart=None): for n in x: x[n]*=s # check convergence err=sum([abs(x[n]-xlast[n]) for n in x]) - if err < n*tol: + if err < nnodes*tol: return x raise NetworkXError("eigenvector_centrality(): power iteration failed to converge in %d iterations."%(i+1))
networkx/algorithms/centrality.py
ReplaceText(target='nnodes' @(682,17)->(682,18))
def eigenvector_centrality(G,max_iter=100,tol=1.0e-6,nstart=None): for n in x: x[n]*=s # check convergence err=sum([abs(x[n]-xlast[n]) for n in x]) if err < n*tol: return x raise NetworkXError("eigenvector_centrality(): power iteration failed to converge in %d iterations."%(i+1))
def eigenvector_centrality(G,max_iter=100,tol=1.0e-6,nstart=None): for n in x: x[n]*=s # check convergence err=sum([abs(x[n]-xlast[n]) for n in x]) if err < nnodes*tol: return x raise NetworkXError("eigenvector_centrality(): power iteration failed to converge in %d iterations."%(i+1))
1,473
https://:@github.com/pattern-inc/cynetworkx.git
e9c5b23d0348c6587955208c0c806fd129ba4a1c
@@ -49,7 +49,7 @@ def _initial_tree_solution(G, r, demand = 'demand', capacity = 'capacity', maxWeight = 0 hugeWeight = 1 + n * maxWeight - labelGenerator = _gen_node_label(G) + labelGenerator = _gen_node_label(H) for v, d in G.nodes(data = True)[1:]: vDemand = d.get(demand, 0)
networkx/algorithms/flow/mincost.py
ReplaceText(target='H' @(52,37)->(52,38))
def _initial_tree_solution(G, r, demand = 'demand', capacity = 'capacity', maxWeight = 0 hugeWeight = 1 + n * maxWeight labelGenerator = _gen_node_label(G) for v, d in G.nodes(data = True)[1:]: vDemand = d.get(demand, 0)
def _initial_tree_solution(G, r, demand = 'demand', capacity = 'capacity', maxWeight = 0 hugeWeight = 1 + n * maxWeight labelGenerator = _gen_node_label(H) for v, d in G.nodes(data = True)[1:]: vDemand = d.get(demand, 0)
1,474
https://:@github.com/pattern-inc/cynetworkx.git
9a9b57ed411305c6f8c3a52d424014f17ed2ac5f
@@ -85,7 +85,7 @@ def topological_sort(G,nbunch=None): if nbunch is None: nbunch = G.nodes_iter() - for v in G: # process all vertices in G + for v in nbunch: # process all vertices in G if v in explored: continue fringe=[v] # nodes yet to look at
networkx/algorithms/dag.py
ReplaceText(target='nbunch' @(88,13)->(88,14))
def topological_sort(G,nbunch=None): if nbunch is None: nbunch = G.nodes_iter() for v in G: # process all vertices in G if v in explored: continue fringe=[v] # nodes yet to look at
def topological_sort(G,nbunch=None): if nbunch is None: nbunch = G.nodes_iter() for v in nbunch: # process all vertices in G if v in explored: continue fringe=[v] # nodes yet to look at
1,475
https://:@github.com/pattern-inc/cynetworkx.git
f22c0f25b393ad63c2a0273dea3bd095bcb4d63f
@@ -67,7 +67,7 @@ def all_simple_paths(G, source, target, cutoff=None): if source not in G: raise nx.NetworkXError('source node %s not in graph'%source) if target not in G: - raise nx.NetworkXError('target node %s not in graph'%source) + raise nx.NetworkXError('target node %s not in graph'%target) if cutoff is None: cutoff = len(G)-1 if G.is_multigraph():
networkx/algorithms/simple_paths.py
ReplaceText(target='target' @(70,61)->(70,67))
def all_simple_paths(G, source, target, cutoff=None): if source not in G: raise nx.NetworkXError('source node %s not in graph'%source) if target not in G: raise nx.NetworkXError('target node %s not in graph'%source) if cutoff is None: cutoff = len(G)-1 if G.is_multigraph():
def all_simple_paths(G, source, target, cutoff=None): if source not in G: raise nx.NetworkXError('source node %s not in graph'%source) if target not in G: raise nx.NetworkXError('target node %s not in graph'%target) if cutoff is None: cutoff = len(G)-1 if G.is_multigraph():
1,476
https://:@github.com/pattern-inc/cynetworkx.git
6ac082b2bdc9d91db7c5c9d84080787f7a98a953
@@ -201,7 +201,7 @@ def simple_cycles(G): if thisnode in closed: _unblock(thisnode,blocked,B) else: - for nbr in G[thisnode]: + for nbr in subG[thisnode]: if thisnode not in B[nbr]: B[nbr].add(thisnode) stack.pop()
networkx/algorithms/cycles.py
ReplaceText(target='subG' @(204,31)->(204,32))
def simple_cycles(G): if thisnode in closed: _unblock(thisnode,blocked,B) else: for nbr in G[thisnode]: if thisnode not in B[nbr]: B[nbr].add(thisnode) stack.pop()
def simple_cycles(G): if thisnode in closed: _unblock(thisnode,blocked,B) else: for nbr in subG[thisnode]: if thisnode not in B[nbr]: B[nbr].add(thisnode) stack.pop()
1,477
https://:@github.com/pattern-inc/cynetworkx.git
31a656f9dd9ca015b4cdfc41c60e27995fb76f30
@@ -89,7 +89,7 @@ def current_flow_closeness_centrality(G, normalized=True, weight='weight', # this could be done without a copy if we really wanted to H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H - n = G.number_of_nodes() + n = H.number_of_nodes() L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight, dtype=dtype, format='csc') C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
networkx/algorithms/centrality/current_flow_closeness.py
ReplaceText(target='H' @(92,8)->(92,9))
def current_flow_closeness_centrality(G, normalized=True, weight='weight', # this could be done without a copy if we really wanted to H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H n = G.number_of_nodes() L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight, dtype=dtype, format='csc') C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
def current_flow_closeness_centrality(G, normalized=True, weight='weight', # this could be done without a copy if we really wanted to H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H n = H.number_of_nodes() L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight, dtype=dtype, format='csc') C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
1,478
https://:@github.com/pattern-inc/cynetworkx.git
1f1ee6761d04f572bc9afd892a6b43a77dad1150
@@ -38,6 +38,6 @@ class TestDispersion(object): disp = nx.dispersion(G) for d in disp: for dd in d: - assert d >= 0 + assert dd >= 0
networkx/algorithms/centrality/tests/test_dispersion.py
ReplaceText(target='dd' @(41,23)->(41,24))
class TestDispersion(object): disp = nx.dispersion(G) for d in disp: for dd in d: assert d >= 0
class TestDispersion(object): disp = nx.dispersion(G) for d in disp: for dd in d: assert dd >= 0
1,479
https://:@github.com/pattern-inc/cynetworkx.git
6f01f084cbf7fd71f3e2e670a2d25c0358d54cd1
@@ -89,7 +89,7 @@ def shortest_augmenting_path_impl(G, s, t, capacity, two_phase): path = [s] u = s d = n if not two_phase else int(min(m ** 0.5, 2 * n ** (2. / 3))) - done = R.node[s]['height'] < d + done = R.node[s]['height'] >= d while not done: height = R.node[u]['height'] curr_edge = R.node[u]['curr_edge']
networkx/algorithms/flow/shortest_augmenting_path.py
ReplaceText(target='>=' @(92,31)->(92,32))
def shortest_augmenting_path_impl(G, s, t, capacity, two_phase): path = [s] u = s d = n if not two_phase else int(min(m ** 0.5, 2 * n ** (2. / 3))) done = R.node[s]['height'] < d while not done: height = R.node[u]['height'] curr_edge = R.node[u]['curr_edge']
def shortest_augmenting_path_impl(G, s, t, capacity, two_phase): path = [s] u = s d = n if not two_phase else int(min(m ** 0.5, 2 * n ** (2. / 3))) done = R.node[s]['height'] >= d while not done: height = R.node[u]['height'] curr_edge = R.node[u]['curr_edge']
1,480
https://:@github.com/pattern-inc/cynetworkx.git
6b2dbffd2132b4d7034f59d8a9b089f8c9ca848f
@@ -158,5 +158,5 @@ def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs): else: ky = target_data.pop(key, None) graph.add_edge(source, target, key=ky) - graph[source][target][ky].update(target_data) + graph[source][target][ky].update(tdata) return graph
networkx/readwrite/json_graph/adjacency.py
ReplaceText(target='tdata' @(161,49)->(161,60))
def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs): else: ky = target_data.pop(key, None) graph.add_edge(source, target, key=ky) graph[source][target][ky].update(target_data) return graph
def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs): else: ky = target_data.pop(key, None) graph.add_edge(source, target, key=ky) graph[source][target][ky].update(tdata) return graph
1,481
https://:@github.com/pattern-inc/cynetworkx.git
5864982dd97baf21aac4e743de5fbbb103860017
@@ -48,7 +48,7 @@ def cytoscape_data(G, attrs=None): n = {"data" : j.copy()} n["data"]["id"] = str(i) n["data"]["value"] = i - n["data"]["name"] = n.get(name) or str(i) + n["data"]["name"] = j.get(name) or str(i) nodes.append(n) for e in G.edges():
networkx/readwrite/json_graph/cytoscape.py
ReplaceText(target='j' @(51,28)->(51,29))
def cytoscape_data(G, attrs=None): n = {"data" : j.copy()} n["data"]["id"] = str(i) n["data"]["value"] = i n["data"]["name"] = n.get(name) or str(i) nodes.append(n) for e in G.edges():
def cytoscape_data(G, attrs=None): n = {"data" : j.copy()} n["data"]["id"] = str(i) n["data"]["value"] = i n["data"]["name"] = j.get(name) or str(i) nodes.append(n) for e in G.edges():
1,482
https://:@github.com/pattern-inc/cynetworkx.git
fbc901aa2d30c8dfbe145af747097e92d9b5e5c9
@@ -594,5 +594,5 @@ def _add_nodes_with_bipartite_label(G, lena, lenb): G.add_nodes_from(range(0,lena+lenb)) b=dict(zip(range(0,lena),[0]*lena)) b.update(dict(zip(range(lena,lena+lenb),[1]*lenb))) - nx.set_node_attributes(G,'bipartite',b) + nx.set_node_attributes(G, b, 'bipartite') return G
networkx/algorithms/bipartite/generators.py
ArgSwap(idxs=1<->2 @(597,4)->(597,26))
def _add_nodes_with_bipartite_label(G, lena, lenb): G.add_nodes_from(range(0,lena+lenb)) b=dict(zip(range(0,lena),[0]*lena)) b.update(dict(zip(range(lena,lena+lenb),[1]*lenb))) nx.set_node_attributes(G,'bipartite',b) return G
def _add_nodes_with_bipartite_label(G, lena, lenb): G.add_nodes_from(range(0,lena+lenb)) b=dict(zip(range(0,lena),[0]*lena)) b.update(dict(zip(range(lena,lena+lenb),[1]*lenb))) nx.set_node_attributes(G, b, 'bipartite') return G
1,483
https://:@github.com/pattern-inc/cynetworkx.git
fbc901aa2d30c8dfbe145af747097e92d9b5e5c9
@@ -440,7 +440,7 @@ def condensation(G, scc=None): C.add_edges_from((mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v]) # Add a list of members (ie original nodes) to each node (ie scc) in C. - nx.set_node_attributes(C, 'members', members) + nx.set_node_attributes(C, members, 'members') # Add mapping dict as graph attribute C.graph['mapping'] = mapping return C
networkx/algorithms/components/strongly_connected.py
ArgSwap(idxs=1<->2 @(443,4)->(443,26))
def condensation(G, scc=None): C.add_edges_from((mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v]) # Add a list of members (ie original nodes) to each node (ie scc) in C. nx.set_node_attributes(C, 'members', members) # Add mapping dict as graph attribute C.graph['mapping'] = mapping return C
def condensation(G, scc=None): C.add_edges_from((mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v]) # Add a list of members (ie original nodes) to each node (ie scc) in C. nx.set_node_attributes(C, members, 'members') # Add mapping dict as graph attribute C.graph['mapping'] = mapping return C
1,484
https://:@github.com/pattern-inc/cynetworkx.git
fbc901aa2d30c8dfbe145af747097e92d9b5e5c9
@@ -502,8 +502,8 @@ class TestCutoff: def test_complete_graph_cutoff(self): G = nx.complete_graph(5) - nx.set_edge_attributes(G, 'capacity', - dict(((u, v), 1) for u, v in G.edges())) + nx.set_edge_attributes(G, dict(((u, v), 1) for u, v in G.edges()), + 'capacity') for flow_func in [shortest_augmenting_path, edmonds_karp]: for cutoff in [3, 2, 1]: result = nx.maximum_flow_value(G, 0, 4, flow_func=flow_func,
networkx/algorithms/flow/tests/test_maxflow.py
ArgSwap(idxs=1<->2 @(505,8)->(505,30))
class TestCutoff: def test_complete_graph_cutoff(self): G = nx.complete_graph(5) nx.set_edge_attributes(G, 'capacity', dict(((u, v), 1) for u, v in G.edges())) for flow_func in [shortest_augmenting_path, edmonds_karp]: for cutoff in [3, 2, 1]: result = nx.maximum_flow_value(G, 0, 4, flow_func=flow_func,
class TestCutoff: def test_complete_graph_cutoff(self): G = nx.complete_graph(5) nx.set_edge_attributes(G, dict(((u, v), 1) for u, v in G.edges()), 'capacity') for flow_func in [shortest_augmenting_path, edmonds_karp]: for cutoff in [3, 2, 1]: result = nx.maximum_flow_value(G, 0, 4, flow_func=flow_func,
1,485
https://:@github.com/pattern-inc/cynetworkx.git
fbc901aa2d30c8dfbe145af747097e92d9b5e5c9
@@ -85,7 +85,7 @@ class TestMaxflowLargeGraph: def test_complete_graph(self): N = 50 G = nx.complete_graph(N) - nx.set_edge_attributes(G, 'capacity', 5) + nx.set_edge_attributes(G, 5, 'capacity') R = build_residual_network(G, 'capacity') kwargs = dict(residual=R)
networkx/algorithms/flow/tests/test_maxflow_large_graph.py
ArgSwap(idxs=1<->2 @(88,8)->(88,30))
class TestMaxflowLargeGraph: def test_complete_graph(self): N = 50 G = nx.complete_graph(N) nx.set_edge_attributes(G, 'capacity', 5) R = build_residual_network(G, 'capacity') kwargs = dict(residual=R)
class TestMaxflowLargeGraph: def test_complete_graph(self): N = 50 G = nx.complete_graph(N) nx.set_edge_attributes(G, 5, 'capacity') R = build_residual_network(G, 'capacity') kwargs = dict(residual=R)
1,486
https://:@github.com/kundajelab/keras-genomics.git
6b6c1a9d123f91193e5311ccd22d169a6c9d0733
@@ -4,5 +4,5 @@ from keras import backend as K def ambig_binary_crossentropy(y_true,y_pred): non_ambig = K.cast((y_true > -0.5),'float32') - return K.mean(K.binary_crossentropy(y_pred, y_true) + return K.mean(K.binary_crossentropy(y_true, y_pred) *non_ambig, axis=-1)
keras_genomics/losses.py
ArgSwap(idxs=0<->1 @(7,22)->(7,43))
from keras import backend as K def ambig_binary_crossentropy(y_true,y_pred): non_ambig = K.cast((y_true > -0.5),'float32') return K.mean(K.binary_crossentropy(y_pred, y_true) *non_ambig, axis=-1)
from keras import backend as K def ambig_binary_crossentropy(y_true,y_pred): non_ambig = K.cast((y_true > -0.5),'float32') return K.mean(K.binary_crossentropy(y_true, y_pred) *non_ambig, axis=-1)
1,487
https://:@github.com/Nekmo/nekumo-cloud.git
f111818974631fad02fa0146f04638783b3da8df
@@ -41,7 +41,7 @@ class NekumoManagement(object): args = self.parser.parse_args(argv[1:]) self.nekumo.gateways = list(self.parse_gateways(args)) self.nekumo.ifaces = list(self.parse_ifaces(args)) - if 'NEKUMO_DEBUG_IFACE' in os.environ: + if 'NEKUMO_DEBUG_IFACE' not in os.environ: loop = asyncio.get_event_loop() loop.run_forever()
nekumo/core/management.py
ReplaceText(target=' not in ' @(44,31)->(44,35))
class NekumoManagement(object): args = self.parser.parse_args(argv[1:]) self.nekumo.gateways = list(self.parse_gateways(args)) self.nekumo.ifaces = list(self.parse_ifaces(args)) if 'NEKUMO_DEBUG_IFACE' in os.environ: loop = asyncio.get_event_loop() loop.run_forever()
class NekumoManagement(object): args = self.parser.parse_args(argv[1:]) self.nekumo.gateways = list(self.parse_gateways(args)) self.nekumo.ifaces = list(self.parse_ifaces(args)) if 'NEKUMO_DEBUG_IFACE' not in os.environ: loop = asyncio.get_event_loop() loop.run_forever()
1,488
https://:@github.com/stonebig/baresql.git
edeb2171759dc758dfa557404982c34e2fe385f2
@@ -225,7 +225,7 @@ class baresql(object): level = 0 status="normal" self.cte_dico = {} - elif token == "TK_OTHER" and not cte_inline: + elif token == "TK_OTHER" and cte_inline: if tk_value.lower() == "from": from_lvl[level] = True elif from_lvl[level]:
baresql/baresql.py
ReplaceText(target='' @(228,41)->(228,45))
class baresql(object): level = 0 status="normal" self.cte_dico = {} elif token == "TK_OTHER" and not cte_inline: if tk_value.lower() == "from": from_lvl[level] = True elif from_lvl[level]:
class baresql(object): level = 0 status="normal" self.cte_dico = {} elif token == "TK_OTHER" and cte_inline: if tk_value.lower() == "from": from_lvl[level] = True elif from_lvl[level]:
1,489
https://:@github.com/stonebig/baresql.git
12ac74b6d6f75f723938608ef1b3202e81f8d7d5
@@ -437,7 +437,7 @@ class baresql(object): for table_ref in tables: table_sql = table_ref+"$$" - df = env[table_ref] + df = names_env[table_ref] df = self._ensure_data_frame(df, table_ref) #destroy previous Python temp table before importing the new one pre_q = "DROP TABLE IF EXISTS %s" % table_sql
baresql/baresql.py
ReplaceText(target='names_env' @(440,17)->(440,20))
class baresql(object): for table_ref in tables: table_sql = table_ref+"$$" df = env[table_ref] df = self._ensure_data_frame(df, table_ref) #destroy previous Python temp table before importing the new one pre_q = "DROP TABLE IF EXISTS %s" % table_sql
class baresql(object): for table_ref in tables: table_sql = table_ref+"$$" df = names_env[table_ref] df = self._ensure_data_frame(df, table_ref) #destroy previous Python temp table before importing the new one pre_q = "DROP TABLE IF EXISTS %s" % table_sql
1,490
https://:@github.com/takluyver/astsearch.git
68b9fe6322acccd76ff8353726e5cb9010064d9b
@@ -31,7 +31,7 @@ class ASTPatternFinder(object): with open(file, 'rb') as f: tree = ast.parse(f.read()) else: - tree = ast.parse(f.read()) + tree = ast.parse(file.read()) yield from self.scan_ast(tree) def filter_subdirs(self, dirnames):
astsearch.py
ReplaceText(target='file' @(34,29)->(34,30))
class ASTPatternFinder(object): with open(file, 'rb') as f: tree = ast.parse(f.read()) else: tree = ast.parse(f.read()) yield from self.scan_ast(tree) def filter_subdirs(self, dirnames):
class ASTPatternFinder(object): with open(file, 'rb') as f: tree = ast.parse(f.read()) else: tree = ast.parse(file.read()) yield from self.scan_ast(tree) def filter_subdirs(self, dirnames):
1,491
https://:@github.com/rsanchezgarc/micrograph_cleaner_em.git
df470ae67588e8a1c7ba04ce14b517552e0ad788
@@ -41,7 +41,7 @@ def cleanOneMic(micFname, boxSize, deepLearningModel=DEFAULT_MODEL_PATH, inputCo global MASK_PREDICTOR_HANDLER with LOCK: if MASK_PREDICTOR_HANDLER is None: - MASK_PREDICTOR_HANDLER= MaskPredictor(deepLearningModel, boxSize, gpus) + MASK_PREDICTOR_HANDLER= MaskPredictor(boxSize, deepLearningModel, gpus) maskPredictor= MASK_PREDICTOR_HANDLER
micrograph_cleaner_em/cleanOneMic.py
ArgSwap(idxs=0<->1 @(44,30)->(44,43))
def cleanOneMic(micFname, boxSize, deepLearningModel=DEFAULT_MODEL_PATH, inputCo global MASK_PREDICTOR_HANDLER with LOCK: if MASK_PREDICTOR_HANDLER is None: MASK_PREDICTOR_HANDLER= MaskPredictor(deepLearningModel, boxSize, gpus) maskPredictor= MASK_PREDICTOR_HANDLER
def cleanOneMic(micFname, boxSize, deepLearningModel=DEFAULT_MODEL_PATH, inputCo global MASK_PREDICTOR_HANDLER with LOCK: if MASK_PREDICTOR_HANDLER is None: MASK_PREDICTOR_HANDLER= MaskPredictor(boxSize, deepLearningModel, gpus) maskPredictor= MASK_PREDICTOR_HANDLER
1,492
https://:@github.com/rsanchezgarc/micrograph_cleaner_em.git
df470ae67588e8a1c7ba04ce14b517552e0ad788
@@ -19,7 +19,7 @@ class TestMaskPredictor(TestCase): with mrcfile.open(micFname, permissive=True) as f: mic = f.data.copy() - with MaskPredictor(deepLearningModelFname, boxSize, gpus=[0]) as mp: + with MaskPredictor(boxSize, deepLearningModelFname, gpus=[0]) as mp: mask = mp.predictMask(mic) self.assertTrue(mask.shape==mic.shape, "Error, mask shape is not the same that mic shape")
micrograph_cleaner_em/tests/test_maskPredictor.py
ArgSwap(idxs=0<->1 @(22,9)->(22,22))
class TestMaskPredictor(TestCase): with mrcfile.open(micFname, permissive=True) as f: mic = f.data.copy() with MaskPredictor(deepLearningModelFname, boxSize, gpus=[0]) as mp: mask = mp.predictMask(mic) self.assertTrue(mask.shape==mic.shape, "Error, mask shape is not the same that mic shape")
class TestMaskPredictor(TestCase): with mrcfile.open(micFname, permissive=True) as f: mic = f.data.copy() with MaskPredictor(boxSize, deepLearningModelFname, gpus=[0]) as mp: mask = mp.predictMask(mic) self.assertTrue(mask.shape==mic.shape, "Error, mask shape is not the same that mic shape")
1,493
https://:@github.com/maebert/snoo.git
d2d822c48b4acc5046b4ca28a63153a0d0576615
@@ -116,7 +116,7 @@ class Client: arrow.get(self.session["last_updated"]).shift( seconds=int(self.config["update_interval"]) ) - < arrow.utcnow() + > arrow.utcnow() ): return self.session
snoo/client.py
ReplaceText(target='>' @(119,16)->(119,17))
class Client: arrow.get(self.session["last_updated"]).shift( seconds=int(self.config["update_interval"]) ) < arrow.utcnow() ): return self.session
class Client: arrow.get(self.session["last_updated"]).shift( seconds=int(self.config["update_interval"]) ) > arrow.utcnow() ): return self.session
1,494
https://:@github.com/maebert/snoo.git
4f8657a2132e8c1dfa84b1a49fb043610b70c934
@@ -234,7 +234,7 @@ class Client: method="get", params={"startTime": day.format("MM/DD/YYYY hh:mm:ss")}, ) - result.append(Day._from_data(start_time, data)) + result.append(Day._from_data(day, data)) return result def export_sessions(self, start_time, end_time):
snoo/client.py
ReplaceText(target='day' @(237,41)->(237,51))
class Client: method="get", params={"startTime": day.format("MM/DD/YYYY hh:mm:ss")}, ) result.append(Day._from_data(start_time, data)) return result def export_sessions(self, start_time, end_time):
class Client: method="get", params={"startTime": day.format("MM/DD/YYYY hh:mm:ss")}, ) result.append(Day._from_data(day, data)) return result def export_sessions(self, start_time, end_time):
1,495
https://:@github.com/Microsoft/Recommenders.git
57d99ce3be798dd2d48a169fbeaaec76a6d9637e
@@ -58,7 +58,7 @@ def _merge_rating_true_pred( # Select the columns needed for evaluations rating_true = rating_true[[col_user, col_item, col_rating]] - rating_pred = rating_true[[col_user, col_item, col_prediction]] + rating_pred = rating_pred[[col_user, col_item, col_prediction]] if col_rating == col_prediction: rating_true_pred = pd.merge(
reco_utils/evaluation/python_evaluation.py
ReplaceText(target='rating_pred' @(61,18)->(61,29))
def _merge_rating_true_pred( # Select the columns needed for evaluations rating_true = rating_true[[col_user, col_item, col_rating]] rating_pred = rating_true[[col_user, col_item, col_prediction]] if col_rating == col_prediction: rating_true_pred = pd.merge(
def _merge_rating_true_pred( # Select the columns needed for evaluations rating_true = rating_true[[col_user, col_item, col_rating]] rating_pred = rating_pred[[col_user, col_item, col_prediction]] if col_rating == col_prediction: rating_true_pred = pd.merge(
1,496
https://:@github.com/Microsoft/Recommenders.git
367ca689d059d7569beb7b3cf153720fed323e30
@@ -160,7 +160,7 @@ def split_pandas_data_with_ratios(data, ratios, seed=1234, resample=False): splits = np.split(data, [round(x * len(data)) for x in split_index]) # Add split index (this makes splitting by group more efficient). - for i in range(len(split_index)): + for i in range(len(ratios)): splits[i]['split_index'] = i return splits
reco_utils/dataset/split_utils.py
ReplaceText(target='ratios' @(163,23)->(163,34))
def split_pandas_data_with_ratios(data, ratios, seed=1234, resample=False): splits = np.split(data, [round(x * len(data)) for x in split_index]) # Add split index (this makes splitting by group more efficient). for i in range(len(split_index)): splits[i]['split_index'] = i return splits
def split_pandas_data_with_ratios(data, ratios, seed=1234, resample=False): splits = np.split(data, [round(x * len(data)) for x in split_index]) # Add split index (this makes splitting by group more efficient). for i in range(len(ratios)): splits[i]['split_index'] = i return splits
1,497
https://:@github.com/jkreklow/radproc.git
b319770b94b707a4cdaa335911e857a311f3f92c
@@ -374,7 +374,7 @@ def export_dfrows_to_gdb(dataDF, idRaster, outGDBPath, GDBName, statistics=""): outRaster = "R_%i" % (index.year) elif dataDF.index.is_month_end.all() == True or dataDF.index.is_month_start.all() == True: outRaster = "R_%i%02i" % (index.year, index.month) - elif dataDF.index.hour.all() == 0: + elif dataDF.index.hour.all() != 0: outRaster = "R_%i%02i%02i" % (index.year, index.month, index.day) else: outRaster = "R_%i%02i%02i_%02i%02i" % (index.year, index.month, index.day, index.hour, index.minute)
build/lib/radproc/arcgis.py
ReplaceText(target='!=' @(377,41)->(377,43))
def export_dfrows_to_gdb(dataDF, idRaster, outGDBPath, GDBName, statistics=""): outRaster = "R_%i" % (index.year) elif dataDF.index.is_month_end.all() == True or dataDF.index.is_month_start.all() == True: outRaster = "R_%i%02i" % (index.year, index.month) elif dataDF.index.hour.all() == 0: outRaster = "R_%i%02i%02i" % (index.year, index.month, index.day) else: outRaster = "R_%i%02i%02i_%02i%02i" % (index.year, index.month, index.day, index.hour, index.minute)
def export_dfrows_to_gdb(dataDF, idRaster, outGDBPath, GDBName, statistics=""): outRaster = "R_%i" % (index.year) elif dataDF.index.is_month_end.all() == True or dataDF.index.is_month_start.all() == True: outRaster = "R_%i%02i" % (index.year, index.month) elif dataDF.index.hour.all() != 0: outRaster = "R_%i%02i%02i" % (index.year, index.month, index.day) else: outRaster = "R_%i%02i%02i_%02i%02i" % (index.year, index.month, index.day, index.hour, index.minute)
1,498
https://:@github.com/graham/python_xid.git
232f0ae87a6feee945a1b9e09bf99eefbf30c7a5
@@ -146,7 +146,7 @@ class Xid(object): def from_string(cls, s): # type: (str) -> Xid val = base32hex.b32decode(s.upper()) - value_check = [0 < x < 255 for x in val] + value_check = [0 <= x < 255 for x in val] if not all(value_check): raise InvalidXid(s)
xid.py
ReplaceText(target='<=' @(149,25)->(149,26))
class Xid(object): def from_string(cls, s): # type: (str) -> Xid val = base32hex.b32decode(s.upper()) value_check = [0 < x < 255 for x in val] if not all(value_check): raise InvalidXid(s)
class Xid(object): def from_string(cls, s): # type: (str) -> Xid val = base32hex.b32decode(s.upper()) value_check = [0 <= x < 255 for x in val] if not all(value_check): raise InvalidXid(s)
1,499
https://:@github.com/tboser/cwltool.git
5d633799f2eae4a5e0fedcae72381213ae9aba30
@@ -117,7 +117,7 @@ class Builder(object): if "secondaryFiles" in binding: if "secondaryFiles" not in datum: datum["secondaryFiles"] = [] - for sf in aslist(schema["secondaryFiles"]): + for sf in aslist(binding["secondaryFiles"]): if isinstance(sf, dict): sfpath = expression.do_eval(sf, self.job, self.requirements, self.docpath, datum["path"]) else:
reference/cwltool/draft2tool.py
ReplaceText(target='binding' @(120,41)->(120,47))
class Builder(object): if "secondaryFiles" in binding: if "secondaryFiles" not in datum: datum["secondaryFiles"] = [] for sf in aslist(schema["secondaryFiles"]): if isinstance(sf, dict): sfpath = expression.do_eval(sf, self.job, self.requirements, self.docpath, datum["path"]) else:
class Builder(object): if "secondaryFiles" in binding: if "secondaryFiles" not in datum: datum["secondaryFiles"] = [] for sf in aslist(binding["secondaryFiles"]): if isinstance(sf, dict): sfpath = expression.do_eval(sf, self.job, self.requirements, self.docpath, datum["path"]) else: