bugged
stringlengths
4
228k
fixed
stringlengths
0
96.3M
__index_level_0__
int64
0
481k
def calibrate (links, latency, bandwidth, sizes, timings): assert len(sizes) == len(timings) if len(sizes) < 2: return None S_XY = cov(sizes, timings) S_X2 = variance(sizes) a = S_XY / S_X2 b = avg(timings) - a * avg(sizes) return (b * 1e-6) / (latency * links), 1e6 / (a * bandwidth)
def calibrate (links, latency, bandwidth, sizes, timings): assert len(sizes) == len(timings) if len(sizes) < 2: return None S_XY = cov(sizes, timings) S_X2 = variance(sizes) a = S_XY / S_X2 b = avg(timings) - a * avg(sizes) return (b * 1e-6) / (latency * links), 1e6 / (a * bandwidth)
480,700
def calibrate (links, latency, bandwidth, sizes, timings): assert len(sizes) == len(timings) if len(sizes) < 2: return None S_XY = cov(sizes, timings) S_X2 = variance(sizes) a = S_XY / S_X2 b = avg(timings) - a * avg(sizes) return (b * 1e-6) / (latency * links), 1e6 / (a * bandwidth)
def calibrate (links, latency, bandwidth, sizes, timings): assert len(sizes) == len(timings) if len(sizes) < 2: return None S_XY = cov(sizes, timings) S_X2 = variance(sizes) a = S_XY / S_X2 b = avg(timings) - a * avg(sizes) return (b * 1e-6) / (latency * links), 1e6 / (a * bandwidth)
480,701
def run(self): """Build tarballs and create additional files.""" if os.path.isfile("ChangeLog"): os.remove("ChangeLog") os.system("tools/generate-change-log > ChangeLog") assert os.path.isfile("ChangeLog") assert open("ChangeLog", "r").read().strip() distutils.command.sdist.sdist.run(self) basename = "nfoview-%s" % self.__version tarballs = os.listdir(self.dist_dir) os.chdir(self.dist_dir) # Compare tarball contents with working copy. temp_dir = tempfile.gettempdir() test_dir = os.path.join(temp_dir, basename) tobj = tarfile.open(tarballs[-1], "r") for member in tobj.getmembers(): tobj.extract(member, temp_dir) log.info("comparing tarball (tmp) with working copy (../..)") os.system('diff -qr -x ".*" -x "*.pyc" ../.. %s' % test_dir) response = raw_input("Are all files in the tarball [Y/n]? ") if response.lower() == "n": raise SystemExit("Must edit MANIFEST.in.") shutil.rmtree(test_dir) # Create extra distribution files. log.info("calculating md5sums") os.system("md5sum * > %s.md5sum" % basename) log.info("creating '%s.changes'" % basename) source = os.path.join("..", "..", "ChangeLog") shutil.copyfile(source, "%s.changes" % basename) log.info("creating '%s.news'" % basename) source = os.path.join("..", "..", "NEWS") shutil.copyfile(source, "%s.news" % basename) for tarball in tarballs: log.info("signing '%s'" % tarball) os.system("gpg --detach %s" % tarball)
def run(self): """Build tarballs and create additional files.""" if os.path.isfile("ChangeLog"): os.remove("ChangeLog") os.system("tools/generate-change-log > ChangeLog") assert os.path.isfile("ChangeLog") assert open("ChangeLog", "r").read().strip() distutils.command.sdist.sdist.run(self) basename = "nfoview-%s" % self.__version tarballs = os.listdir(self.dist_dir) os.chdir(self.dist_dir) # Compare tarball contents with working copy. temp_dir = tempfile.gettempdir() test_dir = os.path.join(temp_dir, basename) tobj = tarfile.open(tarballs[-1], "r") for member in tobj.getmembers(): tobj.extract(member, temp_dir) log.info("comparing tarball (tmp) with working copy (../..)") os.system('diff -qr -x ".*" -x "*.pyc" ../.. %s' % test_dir) response = raw_input("Are all files in the tarball [Y/n]? ") if response.lower() == "n": raise SystemExit("Must edit MANIFEST.in.") shutil.rmtree(test_dir) # Create extra distribution files. log.info("calculating md5sums") os.system("md5sum * > %s.md5sum" % basename) log.info("creating '%s.changes'" % basename) source = os.path.join("..", "..", "ChangeLog") shutil.copyfile(source, "%s.changes" % basename) log.info("creating '%s.news'" % basename) source = os.path.join("..", "..", "NEWS") shutil.copyfile(source, "%s.news" % basename) for tarball in tarballs: log.info("signing '%s'" % tarball) os.system("gpg --detach %s" % tarball)
480,702
def _read_file(self, path, encoding=None): """Read and return the text of the NFO file.
def _read_file(self, path, encoding=None): """Read and return the text of the NFO file.
480,703
def run_command_or_exit(command): """Run command in shell and raise SystemExit if it fails.""" if subprocess.call(command) != 0: log.error("command %s failed" % repr(command)) raise SystemExit(1)
def run_command_or_exit(command): """Run command in shell and raise SystemExit if it fails.""" if os.system(command) != 0: log.error("command %s failed" % repr(command)) raise SystemExit(1)
480,704
def run_command_or_warn(command): """Run command in shell and raise SystemExit if it fails.""" if subprocess.call(command) != 0: log.warn("command %s failed" % repr(command))
def run_command_or_warn(command): """Run command in shell and raise SystemExit if it fails.""" if os.system(command) != 0: log.warn("command %s failed" % repr(command))
480,705
def run(self): """Install everything and update the desktop file database.""" install.run(self) get_command_obj = self.distribution.get_command_obj root = get_command_obj("install").root data_dir = get_command_obj("install_data").install_dir # Assume we're actually installing if --root was not given. if (root is not None) or (data_dir is None): return directory = os.path.join(data_dir, "share", "applications") log.info("updating desktop database in '%s'" % directory) run_command_or_warn(("update-desktop-database", directory))
def run(self): """Install everything and update the desktop file database.""" install.run(self) get_command_obj = self.distribution.get_command_obj root = get_command_obj("install").root data_dir = get_command_obj("install_data").install_dir # Assume we're actually installing if --root was not given. if (root is not None) or (data_dir is None): return directory = os.path.join(data_dir, "share", "applications") log.info("updating desktop database in '%s'" % directory) run_command_or_warn(("update-desktop-database", directory))
480,706
def calc_cross_points(self,cross=None): sizes = self.sizes() hasc = self.houses[0] nnode = self.planets[10] h = 0 hn = self.which_house(nnode) while hn > h: #if h == 0 and hn == 1: if hn - h == 1 and hn < self.which_house((nnode - 30) % 360): break h = (h+1)%12 hasc = self.houses[h] nnode = (nnode - 30) % 360 hn = self.which_house(nnode) dist = nnode - hasc
defcalc_cross_points(self,cross=None):sizes=self.sizes()hasc=self.houses[0]nnode=self.planets[10]h=0hn=self.which_house(nnode)whilehn>h:#ifh==0andhn==1:ifhn-h==1andhn<self.which_house((nnode-30)%360):breakh=(h+1)%12hasc=self.houses[h]nnode=(nnode-30)%360hn=self.which_house(nnode)dist=nnode-hasc
480,707
def tap(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None, tap_mode=None, direction=None): self.original.tap( x=x, y=y, z=self.z2(z), self.z2(zretract), depth, standoff, dwell_bottom, pitch, stoppos, spin_in, spin_out, tap_mode, direction)
def tap(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None, tap_mode=None, direction=None): self.original.tap( x=x, y=y, z=self.z2(z), self.z2(zretract), depth, standoff, dwell_bottom, pitch, stoppos, spin_in, spin_out, tap_mode, direction)
480,708
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
480,709
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
480,710
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
480,711
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
480,712
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
480,713
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0
480,714
def add_roll_on(k, roll_on_k, direction, roll_radius, offset_extra, roll_on): if direction == "on": return if roll_on == None: return num_spans = kurve.num_spans(k) if num_spans == 0: return if roll_on == 'auto': sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction if direction == 'right': off_vx = vy off_vy = -vx else: off_vx = -vy off_vy = vx rollstartx = sx + off_vx * roll_radius - vx * roll_radius rollstarty = sy + off_vy * roll_radius - vy * roll_radius else: rollstartx, rollstarty = roll_on sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if sx == rollstartx and sy == rollstarty: return vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir # because the tangential_arc was used in reverse # add a start roll on point kurve.add_point(roll_on_k, 0, rollstartx, rollstarty, 0, 0) # add the roll on arc kurve.add_point(roll_on_k, rdir, sx, sy, rcx, rcy) # add the start of the original kurve sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) kurve.add_point(roll_on_k, sp, ex, ey, cx, cy)
def add_roll_on(k, roll_on_k, direction, roll_radius, offset_extra, roll_on): if direction == "on": roll_on = None num_spans = kurve.num_spans(k) if num_spans == 0: return if roll_on == 'auto': sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction if direction == 'right': off_vx = vy off_vy = -vx else: off_vx = -vy off_vy = vx rollstartx = sx + off_vx * roll_radius - vx * roll_radius rollstarty = sy + off_vy * roll_radius - vy * roll_radius else: rollstartx, rollstarty = roll_on sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if sx == rollstartx and sy == rollstarty: return vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir # because the tangential_arc was used in reverse # add a start roll on point kurve.add_point(roll_on_k, 0, rollstartx, rollstarty, 0, 0) # add the roll on arc kurve.add_point(roll_on_k, rdir, sx, sy, rcx, rcy) # add the start of the original kurve sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) kurve.add_point(roll_on_k, sp, ex, ey, cx, cy)
480,715
def add_roll_on(k, roll_on_k, direction, roll_radius, offset_extra, roll_on): if direction == "on": return if roll_on == None: return num_spans = kurve.num_spans(k) if num_spans == 0: return if roll_on == 'auto': sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction if direction == 'right': off_vx = vy off_vy = -vx else: off_vx = -vy off_vy = vx rollstartx = sx + off_vx * roll_radius - vx * roll_radius rollstarty = sy + off_vy * roll_radius - vy * roll_radius else: rollstartx, rollstarty = roll_on sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if sx == rollstartx and sy == rollstarty: return vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir # because the tangential_arc was used in reverse # add a start roll on point kurve.add_point(roll_on_k, 0, rollstartx, rollstarty, 0, 0) # add the roll on arc kurve.add_point(roll_on_k, rdir, sx, sy, rcx, rcy) # add the start of the original kurve sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) kurve.add_point(roll_on_k, sp, ex, ey, cx, cy)
def add_roll_on(k, roll_on_k, direction, roll_radius, offset_extra, roll_on): if direction == "on": return if roll_on == None: return num_spans = kurve.num_spans(k) if num_spans == 0: return sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if roll_on == None: rollstartx = sx rollstarty = sy elif roll_on == 'auto': vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction if direction == 'right': off_vx = vy off_vy = -vx else: off_vx = -vy off_vy = vx rollstartx = sx + off_vx * roll_radius - vx * roll_radius rollstarty = sy + off_vy * roll_radius - vy * roll_radius else: rollstartx, rollstarty = roll_on sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if sx == rollstartx and sy == rollstarty: return vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir # because the tangential_arc was used in reverse # add a start roll on point kurve.add_point(roll_on_k, 0, rollstartx, rollstarty, 0, 0) # add the roll on arc kurve.add_point(roll_on_k, rdir, sx, sy, rcx, rcy) # add the start of the original kurve sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) kurve.add_point(roll_on_k, sp, ex, ey, cx, cy)
480,716
def add_roll_on(k, roll_on_k, direction, roll_radius, offset_extra, roll_on): if direction == "on": return if roll_on == None: return num_spans = kurve.num_spans(k) if num_spans == 0: return if roll_on == 'auto': sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction if direction == 'right': off_vx = vy off_vy = -vx else: off_vx = -vy off_vy = vx rollstartx = sx + off_vx * roll_radius - vx * roll_radius rollstarty = sy + off_vy * roll_radius - vy * roll_radius else: rollstartx, rollstarty = roll_on sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if sx == rollstartx and sy == rollstarty: return vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir # because the tangential_arc was used in reverse # add a start roll on point kurve.add_point(roll_on_k, 0, rollstartx, rollstarty, 0, 0) # add the roll on arc kurve.add_point(roll_on_k, rdir, sx, sy, rcx, rcy) # add the start of the original kurve sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) kurve.add_point(roll_on_k, sp, ex, ey, cx, cy)
def add_roll_on(k, roll_on_k, direction, roll_radius, offset_extra, roll_on): if direction == "on": return if roll_on == None: return num_spans = kurve.num_spans(k) if num_spans == 0: return if roll_on == 'auto': sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction if direction == 'right': off_vx = vy off_vy = -vx else: off_vx = -vy off_vy = vx rollstartx = sx + off_vx * roll_radius - vx * roll_radius rollstarty = sy + off_vy * roll_radius - vy * roll_radius else: rollstartx, rollstarty = roll_on sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if sx == rollstartx and sy == rollstarty: return vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir # because the tangential_arc was used in reverse # add a start roll on point kurve.add_point(roll_on_k, 0, rollstartx, rollstarty, 0, 0) # add the roll on arc kurve.add_point(roll_on_k, rdir, sx, sy, rcx, rcy) # add the start of the original kurve sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) kurve.add_point(roll_on_k, sp, ex, ey, cx, cy)
480,717
def cutcone(x_cen, y_cen, z_cen, top_r, bottom_r, depth, step_over): if top_r >= bottom_r: step_count = math.pi * top_r * 2 / step_over else: step_count = math.pi * bottom_r * 2 / step_over loop_count = 0 while (loop_count < 360): top_x = math.sin(loop_count * math.pi / 180) * top_r top_y = math.cos(loop_count * math.pi / 180) * top_r bottom_x = math.sin(loop_count * math.pi / 180) * bottom_r
def cutcone(x_cen, y_cen, z_cen, top_r, bottom_r, depth, step_over): if top_r >= bottom_r: step_count = math.pi * top_r * 2 / step_over else: step_count = math.pi * bottom_r * 2 / step_over loop_count = 0 while (loop_count < 360): top_x = math.sin(loop_count * math.pi / 180) * top_r top_y = math.cos(loop_count * math.pi / 180) * top_r bottom_x = math.sin(loop_count * math.pi / 180) * bottom_r
480,718
def cutcone(x_cen, y_cen, z_cen, top_r, bottom_r, depth, step_over): if top_r >= bottom_r: step_count = math.pi * top_r * 2 / step_over else: step_count = math.pi * bottom_r * 2 / step_over loop_count = 0 while (loop_count < 360): top_x = math.sin(loop_count * math.pi / 180) * top_r top_y = math.cos(loop_count * math.pi / 180) * top_r bottom_x = math.sin(loop_count * math.pi / 180) * bottom_r
def cutcone(x_cen, y_cen, z_cen, top_r, bottom_r, depth, step_over): if top_r >= bottom_r: step_count = math.pi * top_r * 2 / step_over else: step_count = math.pi * bottom_r * 2 / step_over loop_count = 0 while (loop_count < 360): top_x = math.sin(loop_count * math.pi / 180) * top_r top_y = math.cos(loop_count * math.pi / 180) * top_r bottom_x = math.sin(loop_count * math.pi / 180) * bottom_r
480,719
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,720
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,721
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,722
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,723
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,724
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,725
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,726
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,727
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,728
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,729
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down):
480,730
def drill(self, x=None, y=None, z=None, depth=None, standoff=None, dwell=None, peck_depth=None, retract_mode=None, spindle_mode=None):
def drill(self, x=None, y=None, z=None, depth=None, standoff=None, dwell=None, peck_depth=None, retract_mode=None, spindle_mode=None):
480,731
def cut_curve(curve, need_rapid, p, rapid_down_to_height, final_depth): prev_p = p first = True for vertex in curve.getVertices(): if need_rapid and first: # rapid across rapid(vertex.p.x, vertex.p.y) ##rapid down rapid(z = rapid_down_to_height) #feed down feed(z = final_depth) first = False else: dc = vertex.c - prev_p if vertex.type == 1: arc_ccw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) elif vertex.type == -1: arc_cw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) else: feed(vertex.p.x, vertex.p.y) prev_p = vertex.p return prev_p
def cut_curve(curve, need_rapid, p, rapid_down_to_height, current_start_depth, final_depth): prev_p = p first = True for vertex in curve.getVertices(): if need_rapid and first: # rapid across rapid(vertex.p.x, vertex.p.y) ##rapid down rapid(z = rapid_down_to_height) #feed down feed(z = final_depth) first = False else: dc = vertex.c - prev_p if vertex.type == 1: arc_ccw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) elif vertex.type == -1: arc_cw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) else: feed(vertex.p.x, vertex.p.y) prev_p = vertex.p return prev_p
480,732
def cut_curve(curve, need_rapid, p, rapid_down_to_height, final_depth): prev_p = p first = True for vertex in curve.getVertices(): if need_rapid and first: # rapid across rapid(vertex.p.x, vertex.p.y) ##rapid down rapid(z = rapid_down_to_height) #feed down feed(z = final_depth) first = False else: dc = vertex.c - prev_p if vertex.type == 1: arc_ccw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) elif vertex.type == -1: arc_cw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) else: feed(vertex.p.x, vertex.p.y) prev_p = vertex.p return prev_p
def cut_curve(curve, need_rapid, p, rapid_down_to_height, final_depth): prev_p = p first = True for vertex in curve.getVertices(): if need_rapid and first: # rapid across rapid(vertex.p.x, vertex.p.y) ##rapid down rapid(z = current_start_depth + rapid_down_to_height) #feed down feed(z = final_depth) first = False else: dc = vertex.c - prev_p if vertex.type == 1: arc_ccw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) elif vertex.type == -1: arc_cw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) else: feed(vertex.p.x, vertex.p.y) prev_p = vertex.p return prev_p
480,733
def cut_curvelist(curve_list, rapid_down_to_height, depth, clearance_height, keep_tool_down_if_poss): p = area.Point(0, 0) first = True for curve in curve_list: need_rapid = True if first == False: s = curve.FirstVertex().p if keep_tool_down_if_poss == True: # see if we can feed across if feed_possible(p, s): need_rapid = False elif s.x == p.x and s.y == p.y: need_rapid = False if need_rapid: rapid(z = clearance_height) p = cut_curve(curve, need_rapid, p, rapid_down_to_height, depth) first = False rapid(z = clearance_height)
def cut_curvelist(curve_list, rapid_down_to_height, current_start_depth, depth, clearance_height, keep_tool_down_if_poss): p = area.Point(0, 0) first = True for curve in curve_list: need_rapid = True if first == False: s = curve.FirstVertex().p if keep_tool_down_if_poss == True: # see if we can feed across if feed_possible(p, s): need_rapid = False elif s.x == p.x and s.y == p.y: need_rapid = False if need_rapid: rapid(z = clearance_height) p = cut_curve(curve, need_rapid, p, rapid_down_to_height, depth) first = False rapid(z = clearance_height)
480,734
def cut_curvelist(curve_list, rapid_down_to_height, depth, clearance_height, keep_tool_down_if_poss): p = area.Point(0, 0) first = True for curve in curve_list: need_rapid = True if first == False: s = curve.FirstVertex().p if keep_tool_down_if_poss == True: # see if we can feed across if feed_possible(p, s): need_rapid = False elif s.x == p.x and s.y == p.y: need_rapid = False if need_rapid: rapid(z = clearance_height) p = cut_curve(curve, need_rapid, p, rapid_down_to_height, depth) first = False rapid(z = clearance_height)
def cut_curvelist(curve_list, rapid_down_to_height, depth, clearance_height, keep_tool_down_if_poss): p = area.Point(0, 0) first = True for curve in curve_list: need_rapid = True if first == False: s = curve.FirstVertex().p if keep_tool_down_if_poss == True: # see if we can feed across if feed_possible(p, s): need_rapid = False elif s.x == p.x and s.y == p.y: need_rapid = False if need_rapid: rapid(z = clearance_height) p = cut_curve(curve, need_rapid, p, rapid_down_to_height, current_start_depth, depth) first = False rapid(z = clearance_height)
480,735
def tools_run_tests(self): self.addStep(ShellCommand( workdir='tools/release/signing', command=['python', 'tests.py'], name='release_signing_tests', )) self.addStep(ShellCommand( workdir='tools/lib/python', env={'PYTHONPATH': WithProperties('%(topdir)s/tools/lib/python')}, name='run_lib_nosetests', command=['nosetests'], )) self.addStep(ShellCommand( workdir='tools/clobberer', flunkOnFailure=False, name='run_clobbberer_test', command=['python', 'test_clobberer.py', 'http://preproduction-master.build.mozilla.org/~cltbld/index.php', '/home/cltbld/public_html/db/clobberer.db'], ))
def tools_run_tests(self): self.addStep(ShellCommand( workdir='tools/release/signing', command=['python', 'tests.py'], name='release_signing_tests', )) self.addStep(ShellCommand( workdir='tools/lib/python', env={'PYTHONPATH': WithProperties('%(topdir)s/tools/lib/python')}, name='run_lib_nosetests', command=['nosetests'], )) self.addStep(ShellCommand( workdir='tools/clobberer', name='run_clobbberer_test', command=['python', 'test_clobberer.py', 'http://preproduction-master.build.mozilla.org/~cltbld/index.php', '/home/cltbld/public_html/db/clobberer.db'], ))
480,736
def createSummary(self, log): self.parent_class.createSummary(self, log) key = 'pylint-%s' % self.project if not self.build.getProperties().has_key(key): self.setProperty(key, {}) props = self.getProperty(key) for msg, fullmsg in self.MESSAGES.items(): props[fullmsg] = self.getProperty('pylint-%s' % fullmsg) props['total'] = self.getProperty('pylint-total')
def createSummary(self, log): self.parent_class.createSummary(self, log) key = 'pylint-%s' % self.project if not self.build.getProperties().has_key(key): self.setProperty(key, {}) props = self.getProperty(key) for msg, fullmsg in self.MESSAGES.items(): props[fullmsg] = self.getProperty('pylint-%s' % fullmsg) props['total'] = self.getProperty('pylint-total')
480,737
def __init__(self, hgHost, **kwargs): self.parent_class = BuildFactory self.parent_class.__init__(self, **kwargs) self.hgHost = hgHost self.addStep(SetProperty(name='set_topdir', command=['pwd'], property='topdir', workdir='.', )) self.addStep(RemovePYCs(workdir="."))
def __init__(self, hgHost, **kwargs): self.parent_class = BuildFactory self.parent_class.__init__(self, **kwargs) self.hgHost = hgHost self.addStep(SetProperty(name='set_topdir', command=['pwd'], property='topdir', workdir='.', )) self.addStep(RemovePYCs(workdir="."))
480,738
def __init__(self, hgHost, **kwargs): self.parent_class = BuildFactory self.parent_class.__init__(self, **kwargs) #self.addFactoryArguments(hgHost=hgHost) self.hgHost = hgHost self.addStep(SetProperty(name='set_topdir', command=['pwd'], property='topdir', workdir='.', )) self.addStep(ShellCommand(name='rm_pyc', command=['bash', '-c', 'find . -name "*.pyc" -exec rm -f {} ";"'], workdir=".", ))
def__init__(self,hgHost,**kwargs):self.parent_class=BuildFactoryself.parent_class.__init__(self,**kwargs)#self.addFactoryArguments(hgHost=hgHost)self.hgHost=hgHostself.addStep(SetProperty(name='set_topdir',command=['pwd'],property='topdir',workdir='.',))self.addStep(ShellCommand(name='rm_pyc',command=['bash','-c','find.-name"*.pyc"-execrm-f{}";"'],workdir=".",))
480,739
def test_masters(self): self.addStep(ShellCommand(name='test_masters', command=['./test-masters.sh', '-8'], env = { 'PYTHONPATH': WithProperties('%(topdir)s'), 'PATH': WithProperties('%(topdir)s/sandbox/bin:/bin:/usr/bin'), }, workdir="buildbot-configs", ))
def test_masters(self): self.addStep(ShellCommand(name='test_masters', command=['./test-masters.sh', '-8'], env = { 'PYTHONPATH': WithProperties('%(topdir)s:%(topdir)s/tools/lib/python'), 'PATH': WithProperties('%(topdir)s/sandbox/bin:/bin:/usr/bin'), }, workdir="buildbot-configs", ))
480,740
def tools_pylint(self): # TODO: move pylintrc to tools self.addStep(PyLintExtended( command='../../../sandbox/bin/pylint --rcfile=../../.pylintrc *', workdir='tools/lib/python', flunkOnFailure=False, name='tools_lib_pylint', project='tools_lib', )) self.addStep(PyLintExtended( command='find buildbot-helpers buildfarm \ clobberer release stage \ -name \'*.py\' -type f -print0 | \ xargs -0 ../sandbox/bin/pylint \ --rcfile=.pylintrc', workdir="tools", env = {'PYTHONPATH': WithProperties('%(topdir)s:%(topdir)s/tools/lib/python')}, flunkOnFailure=False, name='tools_scripts_pylint', project='tools_scripts', ))
def tools_pylint(self): # TODO: move pylintrc to tools self.addStep(PyLintExtended( command='../../../sandbox/bin/pylint --rcfile=../../.pylintrc *', workdir='tools/lib/python', name='tools_lib_pylint', project='tools_lib', )) self.addStep(PyLintExtended( command='find buildbot-helpers buildfarm \ clobberer release stage \ -name \'*.py\' -type f -print0 | \ xargs -0 ../sandbox/bin/pylint \ --rcfile=.pylintrc', workdir="tools", env = {'PYTHONPATH': WithProperties('%(topdir)s:%(topdir)s/tools/lib/python')}, name='tools_scripts_pylint', project='tools_scripts', ))
480,741
def _get_eval(self): values = {} for field, definition in self._fields.iteritems(): if definition['type'] in ('one2many', 'many2many'): values[field] = [x.id for x in getattr(self, field) or []] else: values[field] = getattr(self, field) values['id'] = self.id return values
def _get_eval(self): values = {} for field, definition in self._fields.iteritems(): if definition['type'] in ('one2many', 'many2many'): values[field] = [x.id for x in getattr(self, field) or []] else: values[field] = getattr(self, field) values['id'] = self.id return values
480,742
def generate_image(d): """ Generates an image accoording to given configuration. """ logging.debug(repr(d)) if d['imagebuilder'] not in IMAGEBUILDERS: raise Exception("Invalid imagebuilder specified!") x = OpenWrtConfig() x.setUUID(d['uuid']) x.setOpenwrtVersion(d['openwrt_ver']) x.setArch(d['arch']) x.setPortLayout(d['port_layout']) x.setWifiIface(d['iface_wifi'], d['driver'], d['channel']) x.setWifiAnt(d['rx_ant'], d['tx_ant']) x.setLanIface(d['iface_lan']) x.setNodeType("adhoc") x.setPassword(d['root_pass']) x.setHostname(d['hostname']) x.setIp(d['ip']) x.setSSID(d['ssid']) # Add WAN interface and all subnets if d['wan_dhcp']: x.addInterface("wan", d['iface_wan'], init = True) else: x.addInterface("wan", d['iface_wan'], d['wan_ip'], d['wan_cidr'], d['wan_gw'], init = True) for subnet in d['subnets']: x.addSubnet(str(subnet['iface']), str(subnet['network']), subnet['cidr'], subnet['dhcp'], True) x.setCaptivePortal(d['captive_portal']) if d['vpn']: x.setVpn(d['vpn_username'], d['vpn_password'], d['vpn_mac'], d['vpn_limit']) if d['lan_wifi_bridge']: x.enableLanWifiBridge() if d['lan_wan_switch']: x.switchWanToLan() # Add optional packages for package in d['opt_pkg']: x.addPackage(package) # Cleanup stuff from previous builds os.chdir(WORKDIR) os.system("rm -rf build/files/*") os.system("rm -rf build/%s/bin/*" % d['imagebuilder']) os.mkdir("build/files/etc") x.generate("build/files/etc") if d['only_config']: # Just pack configuration and send it prefix = hashlib.md5(os.urandom(32)).hexdigest()[0:16] tempfile = os.path.join(DESTINATION, prefix + "-config.zip") zip = ZipFile(tempfile, 'w', ZIP_DEFLATED) os.chdir('build/files') for root, dirs, files in os.walk("etc"): for file in files: zip.write(os.path.join(root, file)) zip.close() # Generate checksum f = open(tempfile, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() result = "%s-%s-config-%s.zip" % (d['hostname'], d['router_name'], filechecksum) destination = os.path.join(DESTINATION, result) os.rename(tempfile, destination) # Send an e-mail t = loader.get_template('generator/email_config.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'config' : result, 'checksum' : checksum, 'network' : { 'name' : settings.NETWORK_NAME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) } }) send_mail( setting.EMAIL_SUBJECT_PREFIX + (_("Configuration for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False ) else: # Generate full image x.build("build/%s" % d['imagebuilder']) # Read image version try: f = open(glob('%s/build/%s/build_dir/target-*/root-*/etc/version' % (WORKDIR, d['imagebuilder']))[0], 'r') version = f.read().strip().replace('.', '_') f.close() except: version = 'unknown' # Get resulting image files = [] for file, type in d['imagefiles']: file = str(file) source = "%s/build/%s/bin/%s" % (WORKDIR, d['imagebuilder'], file) f = open(source, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() ext = os.path.splitext(file)[1] result = "%s-%s-%s%s-%s%s" % (d['hostname'], d['router_name'], version, ("-%s" % type if type else ""), filechecksum, ext) destination = os.path.join(DESTINATION, result) os.rename(source, destination) files.append({ 'name' : result, 'checksum' : checksum }) # Send an e-mail t = loader.get_template('generator/email.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'files' : files, 'network' : { 'name' : settings.NETWORK_NAME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) } }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Router images for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False )
def generate_image(d): """ Generates an image accoording to given configuration. """ logging.debug(repr(d)) if d['imagebuilder'] not in IMAGEBUILDERS: raise Exception("Invalid imagebuilder specified!") x = OpenWrtConfig() x.setUUID(d['uuid']) x.setOpenwrtVersion(d['openwrt_ver']) x.setArch(d['arch']) x.setPortLayout(d['port_layout']) x.setWifiIface(d['iface_wifi'], d['driver'], d['channel']) x.setWifiAnt(d['rx_ant'], d['tx_ant']) x.setLanIface(d['iface_lan']) x.setNodeType("adhoc") x.setPassword(d['root_pass']) x.setHostname(d['hostname']) x.setIp(d['ip']) x.setSSID(d['ssid']) # Add WAN interface and all subnets if d['wan_dhcp']: x.addInterface("wan", d['iface_wan'], init = True) else: x.addInterface("wan", d['iface_wan'], d['wan_ip'], d['wan_cidr'], d['wan_gw'], init = True) for subnet in d['subnets']: x.addSubnet(str(subnet['iface']), str(subnet['network']), subnet['cidr'], subnet['dhcp'], True) x.setCaptivePortal(d['captive_portal']) if d['vpn']: x.setVpn(d['vpn_username'], d['vpn_password'], d['vpn_mac'], d['vpn_limit']) if d['lan_wifi_bridge']: x.enableLanWifiBridge() if d['lan_wan_switch']: x.switchWanToLan() # Add optional packages for package in d['opt_pkg']: x.addPackage(package) # Cleanup stuff from previous builds os.chdir(WORKDIR) os.system("rm -rf build/files/*") os.system("rm -rf build/%s/bin/*" % d['imagebuilder']) os.mkdir("build/files/etc") x.generate("build/files/etc") if d['only_config']: # Just pack configuration and send it prefix = hashlib.md5(os.urandom(32)).hexdigest()[0:16] tempfile = os.path.join(DESTINATION, prefix + "-config.zip") zip = ZipFile(tempfile, 'w', ZIP_DEFLATED) os.chdir('build/files') for root, dirs, files in os.walk("etc"): for file in files: zip.write(os.path.join(root, file)) zip.close() # Generate checksum f = open(tempfile, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() result = "%s-%s-config-%s.zip" % (d['hostname'], d['router_name'], filechecksum) destination = os.path.join(DESTINATION, result) os.rename(tempfile, destination) # Send an e-mail t = loader.get_template('generator/email_config.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'config' : result, 'checksum' : checksum, 'network' : { 'name' : settings.NETWORK_NAME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) } }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Configuration for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False ) else: # Generate full image x.build("build/%s" % d['imagebuilder']) # Read image version try: f = open(glob('%s/build/%s/build_dir/target-*/root-*/etc/version' % (WORKDIR, d['imagebuilder']))[0], 'r') version = f.read().strip().replace('.', '_') f.close() except: version = 'unknown' # Get resulting image files = [] for file, type in d['imagefiles']: file = str(file) source = "%s/build/%s/bin/%s" % (WORKDIR, d['imagebuilder'], file) f = open(source, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() ext = os.path.splitext(file)[1] result = "%s-%s-%s%s-%s%s" % (d['hostname'], d['router_name'], version, ("-%s" % type if type else ""), filechecksum, ext) destination = os.path.join(DESTINATION, result) os.rename(source, destination) files.append({ 'name' : result, 'checksum' : checksum }) # Send an e-mail t = loader.get_template('generator/email.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'files' : files, 'network' : { 'name' : settings.NETWORK_NAME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) } }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Router images for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False )
480,743
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Wireless: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Treat missing firmware version file as NULL version if n.firmware_version == "missing": n.firmware_version = None # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: offset = -3 unit = 1000 if 'Kbit' in info['net']['vpn']['upload_limit']: offset = -4 unit = 1 upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:offset]) // unit except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.project.captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check node's multicast rate if 'mcast_rate' in info['wifi']: rate = safe_int_convert(info['wifi']['mcast_rate']) if rate != 5500: NodeWarning.create(n, WarningCode.McastRateMismatch, EventSource.Monitor) # Check node's wifi bitrate, level and noise if 'signal' in info['wifi']: bitrate = safe_int_convert(info['wifi']['bitrate']) signal = safe_dbm_convert(info['wifi']['signal']) noise = safe_dbm_convert(info['wifi']['noise']) snr = float(signal) / float(noise) add_graph(n, '', GraphType.WifiBitrate, RRAWifiBitrate, 'Wifi Bitrate', 'wifibitrate', bitrate) add_graph(n, '', GraphType.WifiSignalNoise, RRAWifiSignalNoise, 'Wifi Signal/Noise', 'wifisignalnoise', signal, noise) add_graph(n, '', GraphType.WifiSNR, RRAWifiSNR, 'Wifi Signal/Noise Ratio', 'wifisnr', snr) # Generate a graph for number of clients if 'nds' in info: add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs try: if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi except Profile.DoesNotExist: pass add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Wireless: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Treat missing firmware version file as NULL version if n.firmware_version == "missing": n.firmware_version = None # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: offset = -3 unit = 1000 if 'Kbit' in info['net']['vpn']['upload_limit']: offset = -4 unit = 1 upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:offset]) // unit except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.project.captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check node's multicast rate if 'mcast_rate' in info['wifi']: rate = safe_int_convert(info['wifi']['mcast_rate']) if rate != 5500: NodeWarning.create(n, WarningCode.McastRateMismatch, EventSource.Monitor) # Check node's wifi bitrate, level and noise if 'signal' in info['wifi']: bitrate = safe_int_convert(info['wifi']['bitrate']) signal = safe_dbm_convert(info['wifi']['signal']) noise = safe_dbm_convert(info['wifi']['noise']) snr = float(signal) - float(noise) add_graph(n, '', GraphType.WifiBitrate, RRAWifiBitrate, 'Wifi Bitrate', 'wifibitrate', bitrate) add_graph(n, '', GraphType.WifiSignalNoise, RRAWifiSignalNoise, 'Wifi Signal/Noise', 'wifisignalnoise', signal, noise) add_graph(n, '', GraphType.WifiSNR, RRAWifiSNR, 'Wifi Signal/Noise Ratio', 'wifisnr', snr) # Generate a graph for number of clients if 'nds' in info: add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs try: if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi except Profile.DoesNotExist: pass add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
480,744
def __init__(self): """ Class constructor. """ NodeConfig.__init__(self) # Add some basic services self.addService('S35', 'misc') self.addService('K35', 'misc')
def __init__(self): """ Class constructor. """ NodeConfig.__init__(self) # Add some basic services self.addService('S46', 'misc') self.addService('K46', 'misc')
480,745
def __generateMiscScript(self, f): f.write('#!/bin/sh /etc/rc.common\n') f.write('START=35') f.write('\n') f.write('STOP=35') f.write('\n') f.write('start() {\n') # Prevent the time from reseting to far into the past t = datetime.today() f.write('\tif [ ! -f /etc/datetime.save ]; then\n') f.write('\t echo -n "%02d%02d%02d%02d%04d" > /etc/datetime.save\n' % (t.month, t.day, t.hour, t.minute, t.year)) f.write('\tfi\n') f.write('\tDT=`cat /etc/datetime.save`\n') f.write('\tdate $DT\n') f.write('\n') # Allow txtinfo access when selected if 'olsrd-mod-txtinfo' in self.packages: f.write('\tiptables -A INPUT -p tcp --dport 2006 -j ACCEPT\n') f.write('\n') # Set boot_wait to on if it is not set f.write('\tif [ -x /usr/sbin/nvram ]; then\n') f.write('\t\tBOOT_WAIT=`nvram get boot_wait`\n') f.write('\t\t[ "$BOOT_WAIT" != "on" ] && {\n') f.write('\t\t nvram set boot_wait=on\n') f.write('\t\t nvram commit\n') f.write('\t\t}\n') # Set boardflags on WHR-HP-G54 if self.portLayout == 'whr-hp-g54': f.write('\tBOARDFLAGS=`nvram get boardflags`\n') f.write('\t\t[ "$BOARDFLAGS" != "0x3758" ] && {\n') f.write('\t\t nvram set boardflags=0x3758\n') f.write('\t\t nvram commit\n') f.write('\t\t}\n') f.write('\tfi\n') f.write('}\n') f.write('stop() {\n') f.write('\tDT=`date +%m%d%H%M%Y`\n') f.write('\techo $DT > /etc/datetime.save\n') f.write('}\n') f.close() if self.openwrtVersion == "old": # Copy timezone template self.__copyTemplate("general/timezone", os.path.join(self.base, 'TZ'))
def __generateMiscScript(self, f): f.write('#!/bin/sh /etc/rc.common\n') f.write('START=46\n') f.write('STOP=46\n') f.write('start() {\n') # Prevent the time from reseting to far into the past t = datetime.today() f.write('\tif [ ! -f /etc/datetime.save ]; then\n') f.write('\t echo -n "%02d%02d%02d%02d%04d" > /etc/datetime.save\n' % (t.month, t.day, t.hour, t.minute, t.year)) f.write('\tfi\n') f.write('\tDT=`cat /etc/datetime.save`\n') f.write('\tdate $DT\n') f.write('\n') # Allow txtinfo access when selected if 'olsrd-mod-txtinfo' in self.packages: f.write('\tiptables -A INPUT -p tcp --dport 2006 -j ACCEPT\n') f.write('\n') # Set boot_wait to on if it is not set f.write('\tif [ -x /usr/sbin/nvram ]; then\n') f.write('\t\tBOOT_WAIT=`nvram get boot_wait`\n') f.write('\t\t[ "$BOOT_WAIT" != "on" ] && {\n') f.write('\t\t nvram set boot_wait=on\n') f.write('\t\t nvram commit\n') f.write('\t\t}\n') # Set boardflags on WHR-HP-G54 if self.portLayout == 'whr-hp-g54': f.write('\tBOARDFLAGS=`nvram get boardflags`\n') f.write('\t\t[ "$BOARDFLAGS" != "0x3758" ] && {\n') f.write('\t\t nvram set boardflags=0x3758\n') f.write('\t\t nvram commit\n') f.write('\t\t}\n') f.write('\tfi\n') f.write('}\n') f.write('stop() {\n') f.write('\tDT=`date +%m%d%H%M%Y`\n') f.write('\techo $DT > /etc/datetime.save\n') f.write('}\n') f.close() if self.openwrtVersion == "old": # Copy timezone template self.__copyTemplate("general/timezone", os.path.join(self.base, 'TZ'))
480,746
def save(self, user): """ Completes node registration. """ ip = self.cleaned_data.get('ip') project = self.cleaned_data.get('project') pool = self.cleaned_data.get('pool') subnet = None
def save(self, user): """ Completes node registration. """ ip = self.cleaned_data.get('ip') project = self.cleaned_data.get('project') pool = self.cleaned_data.get('pool') subnet = None
480,747
def to_help_string(code): """ A helper method for transforming a warning code to a human readable help string.
def to_help_string(code): """ A helper method for transforming a warning code to a human readable help string.
480,748
def __generateOlsrdConfig(self, f): # Subnet configuration if self.subnets: f.write('Hna4\n') f.write('{\n') for subnet in self.subnets: if subnet['olsr'] and subnet['cidr'] < 29: f.write(' %(subnet)s %(mask)s\n' % subnet) f.write('}\n\n') # General configuration (static) f.write('AllowNoInt yes\n') f.write('UseHysteresis no\n') f.write('LinkQualityFishEye 0\n') f.write('Willingness 3\n') f.write('LinkQualityLevel 2\n') f.write('LinkQualityAging 0.1\n') f.write('LinkQualityAlgorithm "etx_ff"\n') f.write('LinkQualityDijkstraLimit 0 9.0\n') f.write('FIBMetric "flat"\n') f.write('Pollrate 0.025\n') f.write('TcRedundancy 2\n') f.write('MprCoverage 3\n') f.write('NatThreshold 0.75\n') f.write('SmartGateway no\n') f.write('MainIp {0}\n'.format(self.ip)) f.write('SrcIpRoutes yes\n') f.write('\n') # Setup txtinfo plugin when selected if 'olsrd-mod-txtinfo' in self.packages: f.write('LoadPlugin "olsrd_txtinfo.so.0.1"\n') f.write('{\n') f.write(' PlParam "accept" "0.0.0.0"\n') f.write('}\n') f.write('\n') # Setup actions plugin to trigger a nodewatcher script when the default # route is added or removed from the routing table if self.hasClientSubnet: f.write('LoadPlugin "olsrd_actions.so.0.1"\n') f.write('{\n') f.write(' PlParam "trigger" "0.0.0.0>/etc/actions.d/olsr_gateway_action"\n') for dns in self.dns: f.write(' PlParam "trigger" "%s>/etc/actions.d/olsr_dns_action"\n' % dns) f.write('}\n') f.write('\n') # Add the olsrd-mod-actions package self.addPackage('olsrd-mod-actions') # General interface configuration (static) def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n') # Additional interface configuration for interface in self.interfaces: if interface['olsr']: interfaceConfiguration(interface['name'], interface['ip']) f.close()
def __generateOlsrdConfig(self, f): # Subnet configuration if self.subnets: f.write('Hna4\n') f.write('{\n') for subnet in self.subnets: if subnet['olsr'] and subnet['cidr'] < 29: f.write(' %(subnet)s %(mask)s\n' % subnet) f.write('}\n\n') # General configuration (static) f.write('AllowNoInt yes\n') f.write('UseHysteresis no\n') f.write('LinkQualityFishEye 0\n') f.write('Willingness 3\n') f.write('LinkQualityLevel 2\n') f.write('LinkQualityAging 0.1\n') f.write('LinkQualityAlgorithm "etx_ff"\n') f.write('FIBMetric "flat"\n') f.write('Pollrate 0.025\n') f.write('TcRedundancy 2\n') f.write('MprCoverage 3\n') f.write('NatThreshold 0.75\n') f.write('SmartGateway no\n') f.write('MainIp {0}\n'.format(self.ip)) f.write('SrcIpRoutes yes\n') f.write('\n') # Setup txtinfo plugin when selected if 'olsrd-mod-txtinfo' in self.packages: f.write('LoadPlugin "olsrd_txtinfo.so.0.1"\n') f.write('{\n') f.write(' PlParam "accept" "0.0.0.0"\n') f.write('}\n') f.write('\n') # Setup actions plugin to trigger a nodewatcher script when the default # route is added or removed from the routing table if self.hasClientSubnet: f.write('LoadPlugin "olsrd_actions.so.0.1"\n') f.write('{\n') f.write(' PlParam "trigger" "0.0.0.0>/etc/actions.d/olsr_gateway_action"\n') for dns in self.dns: f.write(' PlParam "trigger" "%s>/etc/actions.d/olsr_dns_action"\n' % dns) f.write('}\n') f.write('\n') # Add the olsrd-mod-actions package self.addPackage('olsrd-mod-actions') # General interface configuration (static) def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n') # Additional interface configuration for interface in self.interfaces: if interface['olsr']: interfaceConfiguration(interface['name'], interface['ip']) f.close()
480,749
def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n')
def interfaceConfiguration(name): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n')
480,750
def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n')
def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n')
480,751
def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n')
def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n')
480,752
def __generateNetworkConfig(self, f): # VLAN configuration layout = portLayouts[self.portLayout] if isinstance(layout, tuple): f.write('#### VLAN configuration\n') f.write('config switch %s\n' % ("eth0" if not self.portLayout in switchIds else switchIds[self.portLayout])) f.write('\toption vlan0 "%s"\n' % layout[0]) f.write('\toption vlan1 "%s"\n' % layout[1]) f.write('\n') # Loopback configuration (static) f.write('#### Loopback configuration\n') f.write('config interface loopback\n') f.write('\toption ifname "lo"\n') f.write('\toption proto static\n') f.write('\toption ipaddr 127.0.0.1\n') f.write('\toption netmask 255.0.0.0\n') f.write('\n')
def __generateNetworkConfig(self, f): # VLAN configuration layout = portLayouts[self.portLayout] if isinstance(layout, tuple): f.write('#### VLAN configuration\n') f.write('config switch %s\n' % ("eth0" if not self.portLayout in switchIds else switchIds[self.portLayout])) f.write('\toption vlan0 "%s"\n' % layout[0]) f.write('\toption vlan1 "%s"\n' % layout[1]) f.write('\n') # Loopback configuration (static) f.write('#### Loopback configuration\n') f.write('config interface loopback\n') f.write('\toption ifname "lo"\n') f.write('\toption proto static\n') f.write('\toption ipaddr 127.0.0.1\n') f.write('\toption netmask 255.0.0.0\n') f.write('\n')
480,753
def adapt_to_router_type(self): """ Ensures that new router type is compatible with current configuration. """ if not self.profile: return for entry in self.profile.template.adaptation_chain.all().order_by("priority"): cls = load_plugin(entry.class_name, required_super = RouterTransition) transition = cls() transition.adapt(self)
def adapt_to_router_type(self): """ Ensures that new router type is compatible with current configuration. """ from wlanlj.generator.models import Profile try: self.profile except Profile.DoesNotExist: return for entry in self.profile.template.adaptation_chain.all().order_by("priority"): cls = load_plugin(entry.class_name, required_super = RouterTransition) transition = cls() transition.adapt(self)
480,754
def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1)
def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1)
480,755
def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1)
def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1)
480,756
def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1)
def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1)
480,757
def node_type_as_string_plural(self): """ Returns node type as string. """ if self.node_type == NodeType.Mesh: return _("Mesh nodes") elif self.node_type == NodeType.Server: return _("Server nodes") elif self.node_type == NodeType.Test: return _("Test nodes") elif self.node_type == NodeType.Mobile: return _("Mobile nodes") else: return _("unknown nodes")
def node_type_as_string_plural(self): """ Returns node type as string. """ if self.node_type == NodeType.Mesh: return _("Mesh nodes") elif self.node_type == NodeType.Server: return _("Server nodes") elif self.node_type == NodeType.Test: return _("Test nodes") elif self.node_type == NodeType.Mobile: return _("Mobile nodes") else: return _("unknown nodes")
480,758
def generate_new_node_tweet(node): if not tweets_enabled(): return try: bit_api = bitly.Api(login=settings.BITLY_LOGIN, apikey=settings.BITLY_API_KEY) twitter_api = twitter.Api(username=settings.TWITTER_USERNAME, settings.TWITTER_PASSWORD) node_link = bit_api.shorten(node.get_url()) msg = "A new node %s has just connected to the mesh %s" % (node.name, node_link) twitter_api.PostUpdate(msg) except: logging.warning(format_exc())
def generate_new_node_tweet(node): if not tweets_enabled(): return try: bit_api = bitly.Api(login=settings.BITLY_LOGIN, apikey=settings.BITLY_API_KEY) twitter_api = twitter.Api(username = settings.TWITTER_USERNAME, password = settings.TWITTER_PASSWORD) node_link = bit_api.shorten(node.get_url()) msg = "A new node %s has just connected to the mesh %s" % (node.name, node_link) twitter_api.PostUpdate(msg) except: logging.warning(format_exc())
480,759
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:-3]) // 1000 except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:-3]) // 1000 except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs try: if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi except Profile.DoesNotExist: pass add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
480,760
def parse_tables(data): """ Parses the OLSR routing tables. """ isTable = False isTableHead = False currentTable = '' nodes = {} hna = {} for line in data.splitlines(): line = line.strip() if line[0:6] == 'Table:' and line[7:] in ('Topology', 'HNA', 'MID'): isTable = True isTableHead = True currentTable = line[7:] continue if isTable and isTableHead: isTableHead = False continue if isTable and not line: isTable = False currentTable = '' continue if currentTable == 'Topology': srcIp, dstIp, LQ, ILQ, ETX = line.split('\t') try: if not float(ETX): continue except ValueError: # Newer OLSR versions can use INFINITE as ETX continue srcNode = create_node(srcIp, nodes, hna) dstNode = create_node(dstIp, nodes, hna) srcNode.links.append((dstIp, LQ, ILQ, ETX)) elif currentTable == 'HNA': try: network, cidr, gwIp = line.split('\t') except ValueError: # Newer OLSR versions have changed the format network, gwIp = line.split('\t') network, cidr = network.split('/') node = hna.setdefault(gwIp, []) node.append('%s/%s' % (network, cidr)) elif currentTable == 'MID': ip, alias = line.split('\t') # Treat MIDs as /32 HNAs l = hna.setdefault(ip, []) l.append('%s/32' % alias) return nodes, hna
def parse_tables(data): """ Parses the OLSR routing tables. """ isTable = False isTableHead = False currentTable = '' nodes = {} hna = {} for line in data.splitlines(): line = line.strip() if line[0:6] == 'Table:' and line[7:] in ('Topology', 'HNA', 'MID'): isTable = True isTableHead = True currentTable = line[7:] continue if isTable and isTableHead: isTableHead = False continue if isTable and not line: isTable = False currentTable = '' continue if currentTable == 'Topology': srcIp, dstIp, LQ, ILQ, ETX = line.split('\t') try: if not float(ETX): continue except ValueError: # Newer OLSR versions can use INFINITE as ETX continue srcNode = create_node(srcIp, nodes, hna) dstNode = create_node(dstIp, nodes, hna) srcNode.links.append((dstIp, LQ, ILQ, ETX)) elif currentTable == 'HNA': try: network, cidr, gwIp = line.split('\t') except ValueError: # Newer OLSR versions have changed the format network, gwIp = line.split('\t') network, cidr = network.split('/') node = hna.setdefault(gwIp, []) node.append('%s/%s' % (network, cidr)) elif currentTable == 'MID': ip, alias = line.split('\t') # Treat MIDs as /32 HNAs for x in alias: l = hna.setdefault(ip, []) l.append('%s/32' % x) return nodes, hna
480,761
def item_link(self, item): return item.get_full_url()
def item_link(self, item): return item.get_full_url()
480,762
def __str__(self): return "DS:%s:%s:%s:U:U" % (self.name, self.type, self.heartbeat)
def __str__(self): return "DS:%s:%s:%s:U:U" % (self.name, self.type, self.heartbeat)
480,763
def __str__(self): return "DS:%s:%s:%s:U:U" % (self.name, self.type, self.heartbeat)
def __str__(self): return "DS:%s:%s:%s:U:U" % (self.name, self.type, self.heartbeat)
480,764
def process_node(node_ip, ping_results, is_duped, peers): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients >= ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4: Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state'], 1), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
def process_node(node_ip, ping_results, is_duped, peers): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state'], 1), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
480,765
def draw_graph(graph_id, timespan): """ Draws the specified graph. @param graph_id: Graph primary key @param timespan: Timespan to draw the graph for @return: True on success, False on failure """ logger = draw_graph.get_logger() # First check that we haven't drawn this graph already result = cache.get('nodewatcher.graphs.drawn.{0}.{1}'.format(graph_id, timespan)) if result is not None: return bool(result) # Since the graph has not yet been drawn, let's draw it try: graph_id = int(graph_id) # XXX Check for hardcoded graphs if graph_id > 0: graph = nodes_models.GraphItem.objects.get(pk = graph_id) archive_path = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', graph.rra)) # Actually draw the graph rrd.RRA.graph( graphs.RRA_CONF_MAP[graph.type], str(graph.title), graph.id, archive_path, end_time = int(time.mktime(graph.last_update.timetuple())), dead = graph.dead, last_updated = graph.last_update, timespan = timespan ) else: # XXX One of the hardcoded graphs conf, title, rrd_path = GLOBAL_GRAPHS[graph_id] archive_path = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', rrd_path)) # Actually draw the graph rrd.RRA.graph(conf, title, graph_id, archive_path, timespan = timespan) result = True except: logger.error(traceback.format_exc()) result = False # Mark the graph as drawn cache.set('nodewatcher.graphs.drawn.{0}.{1}'.format(graph_id, timespan), result) return result
def draw_graph(graph_id, timespan): """ Draws the specified graph. @param graph_id: Graph primary key @param timespan: Timespan to draw the graph for @return: True on success, False on failure """ logger = draw_graph.get_logger() # First check that we haven't drawn this graph already result = cache.get('nodewatcher.graphs.drawn.{0}.{1}'.format(graph_id, timespan)) if result is not None: return bool(result) # Since the graph has not yet been drawn, let's draw it try: graph_id = int(graph_id) # XXX Check for hardcoded graphs if graph_id > 0: graph = nodes_models.GraphItem.objects.get(pk = graph_id) archive_path = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', graph.rra)) # Actually draw the graph rrd.RRA.graph( graphs.RRA_CONF_MAP[graph.type], str(graph.title), graph.id, archive_path, end_time = int(time.mktime(graph.last_update.timetuple())), dead = graph.dead, last_update = graph.last_update, timespan = timespan ) else: # XXX One of the hardcoded graphs conf, title, rrd_path = GLOBAL_GRAPHS[graph_id] archive_path = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', rrd_path)) # Actually draw the graph rrd.RRA.graph(conf, title, graph_id, archive_path, timespan = timespan) result = True except: logger.error(traceback.format_exc()) result = False # Mark the graph as drawn cache.set('nodewatcher.graphs.drawn.{0}.{1}'.format(graph_id, timespan), result) return result
480,766
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:-3]) // 1000 except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs try: if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi except Profile.DoesNotExist: pass add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: offset = -3 unit = 1000 if 'Kbit' in info['net']['vpn']['upload_limit']: offset = -4 unit = 1 upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:offset]) // unit except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs try: if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi except Profile.DoesNotExist: pass add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
480,767
def check_mesh_status(): """ Performs a mesh status check. """ # Initialize the state of nodes and subnets, remove out of date ap clients and graph items Node.objects.all().update(visible = False) Subnet.objects.all().update(visible = False) APClient.objects.filter(last_update__lt = datetime.now() - timedelta(minutes = 11)).delete() GraphItem.objects.filter(last_update__lt = datetime.now() - timedelta(days = 30)).delete() # Reset some states NodeWarning.objects.all().update(source = EventSource.Monitor, dirty = False) Node.objects.all().update(warnings = False, conflicting_subnets = False) Link.objects.all().delete() # Fetch routing tables from OLSR try: nodes, hna = wifi_utils.get_tables(settings.MONITOR_OLSR_HOST) except TypeError: logging.error("Unable to fetch routing tables from '%s'!" % settings.MONITOR_OLSR_HOST) return # Ping nodes present in the database and visible in OLSR dbNodes = {} nodesToPing = [] for nodeIp in nodes.keys(): try: # Try to get the node from the database n = Node.get_exclusive(ip = nodeIp) n.visible = True n.peers = len(nodes[nodeIp].links) # If we have succeeded, add to list (if not invalid) if not n.is_invalid(): if n.awaiting_renumber: # Reset any status from awaiting renumber to invalid for notice in n.renumber_notices.all(): try: rn = Node.objects.get(ip = notice.original_ip) if rn.status == NodeStatus.AwaitingRenumber: rn.status = NodeStatus.Invalid rn.node_type = NodeType.Unknown rn.awaiting_renumber = False rn.save() except Node.DoesNotExist: pass notice.delete() n.awaiting_renumber = False n.save() nodesToPing.append(nodeIp) else: n.last_seen = datetime.now() n.peers = len(nodes[nodeIp].links) # Create a warning since node is not registered NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor) n.save() dbNodes[nodeIp] = n except Node.DoesNotExist: # Node does not exist, create an invalid entry for it n = Node(ip = nodeIp, status = NodeStatus.Invalid, last_seen = datetime.now()) n.visible = True n.node_type = NodeType.Unknown n.peers = len(nodes[nodeIp].links) # Check if there are any renumber notices for this IP address try: notice = RenumberNotice.objects.get(original_ip = nodeIp) n.status = NodeStatus.AwaitingRenumber n.node_type = notice.node.node_type n.awaiting_renumber = True except RenumberNotice.DoesNotExist: pass n.save(force_insert = True) dbNodes[nodeIp] = n # Create an event and append a warning since an unknown node has appeared NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor) Event.create_event(n, EventCode.UnknownNodeAppeared, '', EventSource.Monitor) # Add a warning to all nodes that have been stuck in renumbering state for over a week for node in Node.objects.filter(renumber_notices__renumbered_at__lt = datetime.now() - timedelta(days = 7)): NodeWarning.create(node, WarningCode.LongRenumber, EventSource.Monitor) node.save() # Mark invisible nodes as down for node in Node.objects.exclude(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber)): oldStatus = node.status if node.ip not in dbNodes: if node.status == NodeStatus.New: node.status = NodeStatus.Pending elif node.status != NodeStatus.Pending: node.status = NodeStatus.Down node.save() if oldStatus in (NodeStatus.Up, NodeStatus.Visible, NodeStatus.Duped) and node.status == NodeStatus.Down: Event.create_event(node, EventCode.NodeDown, '', EventSource.Monitor) # Invalidate uptime credit for this node node.uptime_last = None node.save() # Setup all node peerings for nodeIp, node in nodes.iteritems(): n = dbNodes[nodeIp] oldRedundancyLink = n.redundancy_link n.redundancy_link = False for peerIp, lq, ilq, etx, vtime in node.links: l = Link(src = n, dst = dbNodes[peerIp], lq = float(lq), ilq = float(ilq), etx = float(etx), vtime = vtime) l.save() # Check if any of the peers has never peered with us before if n.is_adjacency_important() and l.dst.is_adjacency_important() and not n.peer_history.filter(pk = l.dst.pk).count(): n.peer_history.add(l.dst) Event.create_event(n, EventCode.AdjacencyEstablished, '', EventSource.Monitor, data = 'Peer node: %s' % l.dst, aggregate = False) Event.create_event(l.dst, EventCode.AdjacencyEstablished, '', EventSource.Monitor, data = 'Peer node: %s' % n, aggregate = False) # Check if we have a peering with any border routers if l.dst.border_router: n.redundancy_link = True if not n.is_invalid(): if oldRedundancyLink and not n.redundancy_link: Event.create_event(n, EventCode.RedundancyLoss, '', EventSource.Monitor) elif not oldRedundancyLink and n.redundancy_link: Event.create_event(n, EventCode.RedundancyRestored, '', EventSource.Monitor) if n.redundancy_req and not n.redundancy_link: NodeWarning.create(n, WarningCode.NoBorderPeering, EventSource.Monitor) n.save() # Add nodes to topology map and generate output if not getattr(settings, 'MONITOR_DISABLE_GRAPHS', None): # Only generate topology when graphing is not disabled topology = DotTopologyPlotter() for node in dbNodes.values(): topology.addNode(node) topology.save(os.path.join(settings.GRAPH_DIR, 'mesh_topology.png'), os.path.join(settings.GRAPH_DIR, 'mesh_topology.dot')) # Update valid subnet status in the database for nodeIp, subnets in hna.iteritems(): if nodeIp not in dbNodes: continue for subnet in subnets: subnet, cidr = subnet.split("/") try: s = Subnet.objects.get(node__ip = nodeIp, subnet = subnet, cidr = int(cidr)) s.last_seen = datetime.now() s.visible = True if s.status == SubnetStatus.Subset: pass elif s.status in (SubnetStatus.AnnouncedOk, SubnetStatus.NotAnnounced): s.status = SubnetStatus.AnnouncedOk elif not s.node.border_router or s.status == SubnetStatus.Hijacked: NodeWarning.create(s.node, WarningCode.UnregisteredAnnounce, EventSource.Monitor) s.node.save() # Recheck if this is a more specific prefix announce for an allocated prefix if s.status == SubnetStatus.NotAllocated and s.is_more_specific(): s.status = SubnetStatus.Subset s.save() except Subnet.DoesNotExist: # Subnet does not exist, prepare one s = Subnet(node = dbNodes[nodeIp], subnet = subnet, cidr = int(cidr), last_seen = datetime.now()) s.visible = True # Check if this is a more specific prefix announce for an allocated prefix if s.is_more_specific(): s.status = SubnetStatus.Subset else: s.status = SubnetStatus.NotAllocated s.save() # Check if this is a hijack n = dbNodes[nodeIp] try: origin = Subnet.objects.ip_filter( # Subnet overlaps with another one ip_subnet__contains = '%s/%s' % (subnet, cidr) ).exclude( # Of another node (= filter all subnets belonging to current node) node = s.node ).get( # That is allocated and visible allocated = True, visible = True ) s.status = SubnetStatus.Hijacked s.save() # Generate an event Event.create_event(n, EventCode.SubnetHijacked, '', EventSource.Monitor, data = 'Subnet: %s/%s\n Allocated to: %s' % (s.subnet, s.cidr, origin.node)) except Subnet.DoesNotExist: pass # Flag node entry with warnings flag (if not a border router) if s.status != SubnetStatus.Subset and (not n.border_router or s.status == SubnetStatus.Hijacked): NodeWarning.create(n, WarningCode.AnnounceConflict, EventSource.Monitor) n.save() # Detect subnets that cause conflicts and raise warning flags for all involved # nodes if s.is_conflicting(): NodeWarning.create(s.node, WarningCode.AnnounceConflict, EventSource.Monitor) s.node.conflicting_subnets = True s.node.save() for cs in s.get_conflicting_subnets(): NodeWarning.create(cs.node, WarningCode.AnnounceConflict, EventSource.Monitor) cs.node.conflicting_subnets = True cs.node.save() # Remove (or change their status) subnets that are not visible Subnet.objects.filter(status__in = (SubnetStatus.NotAllocated, SubnetStatus.Subset), visible = False).delete() Subnet.objects.filter(status = SubnetStatus.AnnouncedOk, visible = False).update(status = SubnetStatus.NotAnnounced) for subnet in Subnet.objects.filter(status = SubnetStatus.NotAnnounced, node__visible = True): NodeWarning.create(subnet.node, WarningCode.OwnNotAnnounced, EventSource.Monitor) subnet.node.save() # Remove subnets that were hijacked but are not visible anymore for s in Subnet.objects.filter(status = SubnetStatus.Hijacked, visible = False): Event.create_event(s.node, EventCode.SubnetRestored, '', EventSource.Monitor, data = 'Subnet: %s/%s' % (s.subnet, s.cidr)) s.delete() # Remove invisible unknown nodes for node in Node.objects.filter(status = NodeStatus.Invalid, visible = False).all(): # Create an event since an unknown node has disappeared Event.create_event(node, EventCode.UnknownNodeDisappeared, '', EventSource.Monitor) Node.objects.filter(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber), visible = False).delete() # Ping the nodes to prepare information for later node processing varsize_results = {} results, dupes = wifi_utils.ping_hosts(10, nodesToPing) for packet_size in (100, 500, 1000, 1480): r, d = wifi_utils.ping_hosts(10, nodesToPing, packet_size - 8) for node_ip in nodesToPing: varsize_results.setdefault(node_ip, []).append(r[node_ip][3] if node_ip in r else None) if getattr(settings, 'MONITOR_DISABLE_MULTIPROCESSING', None): # Multiprocessing is disabled (the MONITOR_DISABLE_MULTIPROCESSING option is usually # used for debug purpuses where a single process is prefered) for node_ip in nodesToPing: process_node(node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip)) # Commit the transaction here since we do everything in the same session transaction.commit() else: # We MUST commit the current transaction here, because we will be processing # some transactions in parallel and must ensure that this transaction that has # modified the nodes is commited. Otherwise this will deadlock! transaction.commit() worker_results = [] for node_ip in nodesToPing: worker_results.append( WORKER_POOL.apply_async(process_node, (node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip))) ) # Wait for all workers to finish processing objects = {} for result in worker_results: try: k, v = result.get() objects[k] = v except Exception, e: logging.warning(format_exc()) # When GC debugging is enabled make some additional computations if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): global _MAX_GC_OBJCOUNT objcount = sum(objects.values()) if '_MAX_GC_OBJCOUNT' not in globals(): _MAX_GC_OBJCOUNT = objcount logging.debug("GC object count: %d %s" % (objcount, "!M" if objcount > _MAX_GC_OBJCOUNT else "")) _MAX_GC_OBJCOUNT = max(_MAX_GC_OBJCOUNT, objcount) # Cleanup all out of date warnings NodeWarning.clear_obsolete_warnings(EventSource.Monitor)
def check_mesh_status(): """ Performs a mesh status check. """ # Initialize the state of nodes and subnets, remove out of date ap clients and graph items Node.objects.all().update(visible = False) Subnet.objects.all().update(visible = False) APClient.objects.filter(last_update__lt = datetime.now() - timedelta(minutes = 11)).delete() GraphItem.objects.filter(last_update__lt = datetime.now() - timedelta(days = 30)).delete() # Reset some states NodeWarning.objects.all().update(source = EventSource.Monitor, dirty = False) Node.objects.all().update(warnings = False, conflicting_subnets = False) Link.objects.all().delete() # Fetch routing tables from OLSR try: nodes, hna = wifi_utils.get_tables(settings.MONITOR_OLSR_HOST) except TypeError: logging.error("Unable to fetch routing tables from '%s'!" % settings.MONITOR_OLSR_HOST) return # Ping nodes present in the database and visible in OLSR dbNodes = {} nodesToPing = [] for nodeIp in nodes.keys(): try: # Try to get the node from the database n = Node.get_exclusive(ip = nodeIp) n.visible = True n.peers = len(nodes[nodeIp].links) # If we have succeeded, add to list (if not invalid) if not n.is_invalid(): if n.awaiting_renumber: # Reset any status from awaiting renumber to invalid for notice in n.renumber_notices.all(): try: rn = Node.objects.get(ip = notice.original_ip) if rn.status == NodeStatus.AwaitingRenumber: rn.status = NodeStatus.Invalid rn.node_type = NodeType.Unknown rn.awaiting_renumber = False rn.save() except Node.DoesNotExist: pass notice.delete() n.awaiting_renumber = False n.save() nodesToPing.append(nodeIp) else: n.last_seen = datetime.now() n.peers = len(nodes[nodeIp].links) # Create a warning since node is not registered NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor) n.save() dbNodes[nodeIp] = n except Node.DoesNotExist: # Node does not exist, create an invalid entry for it n = Node(ip = nodeIp, status = NodeStatus.Invalid, last_seen = datetime.now()) n.visible = True n.node_type = NodeType.Unknown n.peers = len(nodes[nodeIp].links) # Check if there are any renumber notices for this IP address try: notice = RenumberNotice.objects.get(original_ip = nodeIp) n.status = NodeStatus.AwaitingRenumber n.node_type = notice.node.node_type n.awaiting_renumber = True except RenumberNotice.DoesNotExist: pass n.save(force_insert = True) dbNodes[nodeIp] = n # Create an event and append a warning since an unknown node has appeared NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor) Event.create_event(n, EventCode.UnknownNodeAppeared, '', EventSource.Monitor) # Add a warning to all nodes that have been stuck in renumbering state for over a week for node in Node.objects.filter(renumber_notices__renumbered_at__lt = datetime.now() - timedelta(days = 7)): NodeWarning.create(node, WarningCode.LongRenumber, EventSource.Monitor) node.save() # Mark invisible nodes as down for node in Node.objects.exclude(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber)): oldStatus = node.status if node.ip not in dbNodes: if node.status == NodeStatus.New: node.status = NodeStatus.Pending elif node.status != NodeStatus.Pending: node.status = NodeStatus.Down node.save() if oldStatus in (NodeStatus.Up, NodeStatus.Visible, NodeStatus.Duped) and node.status == NodeStatus.Down: Event.create_event(node, EventCode.NodeDown, '', EventSource.Monitor) # Invalidate uptime credit for this node node.uptime_last = None node.save() # Setup all node peerings for nodeIp, node in nodes.iteritems(): n = dbNodes[nodeIp] oldRedundancyLink = n.redundancy_link n.redundancy_link = False for peerIp, lq, ilq, etx, vtime in node.links: l = Link(src = n, dst = dbNodes[peerIp], lq = float(lq), ilq = float(ilq), etx = float(etx), vtime = vtime) l.save() # Check if any of the peers has never peered with us before if n.is_adjacency_important() and l.dst.is_adjacency_important() and not n.peer_history.filter(pk = l.dst.pk).count(): n.peer_history.add(l.dst) Event.create_event(n, EventCode.AdjacencyEstablished, '', EventSource.Monitor, data = 'Peer node: %s' % l.dst, aggregate = False) Event.create_event(l.dst, EventCode.AdjacencyEstablished, '', EventSource.Monitor, data = 'Peer node: %s' % n, aggregate = False) # Check if we have a peering with any border routers if l.dst.border_router: n.redundancy_link = True if not n.is_invalid(): if oldRedundancyLink and not n.redundancy_link: Event.create_event(n, EventCode.RedundancyLoss, '', EventSource.Monitor) elif not oldRedundancyLink and n.redundancy_link: Event.create_event(n, EventCode.RedundancyRestored, '', EventSource.Monitor) if n.redundancy_req and not n.redundancy_link: NodeWarning.create(n, WarningCode.NoBorderPeering, EventSource.Monitor) n.save() # Add nodes to topology map and generate output if not getattr(settings, 'MONITOR_DISABLE_GRAPHS', None): # Only generate topology when graphing is not disabled topology = DotTopologyPlotter() for node in dbNodes.values(): topology.addNode(node) topology.save(os.path.join(settings.GRAPH_DIR, 'mesh_topology.png'), os.path.join(settings.GRAPH_DIR, 'mesh_topology.dot')) # Update valid subnet status in the database for nodeIp, subnets in hna.iteritems(): if nodeIp not in dbNodes: continue for subnet in subnets: subnet, cidr = subnet.split("/") try: s = Subnet.objects.get(node__ip = nodeIp, subnet = subnet, cidr = int(cidr)) s.last_seen = datetime.now() s.visible = True if s.status == SubnetStatus.Subset: pass elif s.status in (SubnetStatus.AnnouncedOk, SubnetStatus.NotAnnounced): s.status = SubnetStatus.AnnouncedOk elif not s.node.border_router or s.status == SubnetStatus.Hijacked: NodeWarning.create(s.node, WarningCode.UnregisteredAnnounce, EventSource.Monitor) s.node.save() # Recheck if this is a more specific prefix announce for an allocated prefix if s.status == SubnetStatus.NotAllocated and s.is_more_specific(): s.status = SubnetStatus.Subset s.save() except Subnet.DoesNotExist: # Subnet does not exist, prepare one s = Subnet(node = dbNodes[nodeIp], subnet = subnet, cidr = int(cidr), last_seen = datetime.now()) s.visible = True # Check if this is a more specific prefix announce for an allocated prefix if s.is_more_specific(): s.status = SubnetStatus.Subset else: s.status = SubnetStatus.NotAllocated s.save() # Check if this is a hijack n = dbNodes[nodeIp] try: origin = Subnet.objects.ip_filter( # Subnet overlaps with another one ip_subnet__contains = '%s/%s' % (subnet, cidr) ).exclude( # Of another node (= filter all subnets belonging to current node) node = s.node ).get( # That is allocated and visible allocated = True, visible = True ) s.status = SubnetStatus.Hijacked s.save() # Generate an event Event.create_event(n, EventCode.SubnetHijacked, '', EventSource.Monitor, data = 'Subnet: %s/%s\n Allocated to: %s' % (s.subnet, s.cidr, origin.node)) except Subnet.DoesNotExist: pass # Flag node entry with warnings flag (if not a border router) if s.status != SubnetStatus.Subset and (not n.border_router or s.status == SubnetStatus.Hijacked): NodeWarning.create(n, WarningCode.UnregisteredAnnounce, EventSource.Monitor) n.save() # Detect subnets that cause conflicts and raise warning flags for all involved # nodes if s.is_conflicting(): NodeWarning.create(s.node, WarningCode.AnnounceConflict, EventSource.Monitor) s.node.conflicting_subnets = True s.node.save() for cs in s.get_conflicting_subnets(): NodeWarning.create(cs.node, WarningCode.AnnounceConflict, EventSource.Monitor) cs.node.conflicting_subnets = True cs.node.save() # Remove (or change their status) subnets that are not visible Subnet.objects.filter(status__in = (SubnetStatus.NotAllocated, SubnetStatus.Subset), visible = False).delete() Subnet.objects.filter(status = SubnetStatus.AnnouncedOk, visible = False).update(status = SubnetStatus.NotAnnounced) for subnet in Subnet.objects.filter(status = SubnetStatus.NotAnnounced, node__visible = True): NodeWarning.create(subnet.node, WarningCode.OwnNotAnnounced, EventSource.Monitor) subnet.node.save() # Remove subnets that were hijacked but are not visible anymore for s in Subnet.objects.filter(status = SubnetStatus.Hijacked, visible = False): Event.create_event(s.node, EventCode.SubnetRestored, '', EventSource.Monitor, data = 'Subnet: %s/%s' % (s.subnet, s.cidr)) s.delete() # Remove invisible unknown nodes for node in Node.objects.filter(status = NodeStatus.Invalid, visible = False).all(): # Create an event since an unknown node has disappeared Event.create_event(node, EventCode.UnknownNodeDisappeared, '', EventSource.Monitor) Node.objects.filter(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber), visible = False).delete() # Ping the nodes to prepare information for later node processing varsize_results = {} results, dupes = wifi_utils.ping_hosts(10, nodesToPing) for packet_size in (100, 500, 1000, 1480): r, d = wifi_utils.ping_hosts(10, nodesToPing, packet_size - 8) for node_ip in nodesToPing: varsize_results.setdefault(node_ip, []).append(r[node_ip][3] if node_ip in r else None) if getattr(settings, 'MONITOR_DISABLE_MULTIPROCESSING', None): # Multiprocessing is disabled (the MONITOR_DISABLE_MULTIPROCESSING option is usually # used for debug purpuses where a single process is prefered) for node_ip in nodesToPing: process_node(node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip)) # Commit the transaction here since we do everything in the same session transaction.commit() else: # We MUST commit the current transaction here, because we will be processing # some transactions in parallel and must ensure that this transaction that has # modified the nodes is commited. Otherwise this will deadlock! transaction.commit() worker_results = [] for node_ip in nodesToPing: worker_results.append( WORKER_POOL.apply_async(process_node, (node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip))) ) # Wait for all workers to finish processing objects = {} for result in worker_results: try: k, v = result.get() objects[k] = v except Exception, e: logging.warning(format_exc()) # When GC debugging is enabled make some additional computations if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): global _MAX_GC_OBJCOUNT objcount = sum(objects.values()) if '_MAX_GC_OBJCOUNT' not in globals(): _MAX_GC_OBJCOUNT = objcount logging.debug("GC object count: %d %s" % (objcount, "!M" if objcount > _MAX_GC_OBJCOUNT else "")) _MAX_GC_OBJCOUNT = max(_MAX_GC_OBJCOUNT, objcount) # Cleanup all out of date warnings NodeWarning.clear_obsolete_warnings(EventSource.Monitor)
480,768
def __init__(self, user, node, *args, **kwargs): """ Class constructor. """ super(RenumberForm, self).__init__(*args, **kwargs) self.__node = node # Use renumber with subnet only when this is possible self.fields['primary_ip'] = forms.ChoiceField( choices = [ (RenumberAction.SetManually, _("Set manually")) ], initial = RenumberAction.SetManually ) if node.is_primary_ip_in_subnet(): self.fields['primary_ip'].choices.insert(0, (RenumberAction.Renumber, _("Renumber with subnet")) ) self.fields['primary_ip'].initial = RenumberAction.Renumber else: self.fields['primary_ip'].choices.insert(0, (RenumberAction.Keep, _("Keep")), ) self.fields['primary_ip'].initial = RenumberAction.Keep if not user.is_staff: del self.fields['primary_ip'].choices[1] # Setup dynamic form fields, depending on how may subnets a node has primary = node.subnet_set.ip_filter(ip_subnet__contains = "%s/32" % node.ip).filter(allocated = True).exclude(cidr = 0) for subnet in node.subnet_set.filter(allocated = True).order_by('ip_subnet'): pools = [] for pool in node.project.pools.exclude(status = PoolStatus.Full).order_by('network'): pools.append((pool.pk, _("Renumber to %s [%s/%s]") % (pool.description, pool.network, pool.cidr))) choices = [ (RenumberAction.Keep, _("Keep")), (RenumberAction.Remove, _("Remove")) ] # Primary subnets should not be removed if primary and primary[0] == subnet: del choices[1] self.fields['subnet_%s' % subnet.pk] = forms.ChoiceField( choices = choices + pools, initial = RenumberAction.Keep, widget = forms.Select(attrs = { 'class' : 'subnet' }) ) # Field for choosing new subnet prefix size self.fields['prefix_%s' % subnet.pk] = forms.IntegerField()
def __init__(self, user, node, *args, **kwargs): """ Class constructor. """ super(RenumberForm, self).__init__(*args, **kwargs) self.__node = node # Use renumber with subnet only when this is possible self.fields['primary_ip'] = forms.ChoiceField( choices = [ (RenumberAction.SetManually, _("Set manually")) ], initial = RenumberAction.SetManually ) if node.is_primary_ip_in_subnet(): self.fields['primary_ip'].choices.insert(0, (RenumberAction.Renumber, _("Renumber with subnet")) ) self.fields['primary_ip'].initial = RenumberAction.Renumber else: self.fields['primary_ip'].choices.insert(0, (RenumberAction.Keep, _("Keep")), ) self.fields['primary_ip'].initial = RenumberAction.Keep if not user.is_staff: del self.fields['primary_ip'].choices[1] # Setup dynamic form fields, depending on how may subnets a node has primary = node.subnet_set.ip_filter(ip_subnet__contains = "%s/32" % node.ip).filter(allocated = True).exclude(cidr = 0) for subnet in node.subnet_set.filter(allocated = True).order_by('ip_subnet'): pools = [] for pool in node.project.pools.exclude(status = PoolStatus.Full).order_by('network'): pools.append((pool.pk, _("Renumber to %s [%s/%s]") % (pool.description, pool.network, pool.cidr))) choices = [ (RenumberAction.Keep, _("Keep")), (RenumberAction.Remove, _("Remove")) ] # Primary subnets should not be removed if primary and primary[0] == subnet: del choices[1] self.fields['subnet_%s' % subnet.pk] = forms.ChoiceField( choices = choices + pools, initial = RenumberAction.Keep, widget = forms.Select(attrs = { 'class' : 'subnet' }) ) # Field for choosing new subnet prefix size self.fields['prefix_%s' % subnet.pk] = forms.IntegerField(required = False, initial = 27)
480,769
def to_help_string(code): """ A helper method for transforming a warning code to a human readable help string.
def to_help_string(code): """ A helper method for transforming a warning code to a human readable help string.
480,770
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state'], 1), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
def for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None process_node(node_ip, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ping_results, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None is_duped, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peers, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None varsize_results): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None """ for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Processes for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None a for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None single for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node. for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None @param for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node_ip: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Node's for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None IP for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None address for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None @param for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ping_results: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Results for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None obtained for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None from for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ICMP for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ECHO for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None tests for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None @param for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None is_duped: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None True for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None duplicate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None echos for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None received for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None @param for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peers: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Peering for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None from for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None routing for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None daemon for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None @param for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None varsize_results: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Results for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None of for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ICMP for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ECHO for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None tests for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None with for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None variable for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None payloads for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None """ for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None transaction.set_dirty() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None try: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Node.get_exclusive(ip for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node_ip) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None except for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Node.DoesNotExist: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None This for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None might for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None happen for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None when for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None we for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None were for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None the for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None middle for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None of for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None a for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None renumbering for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None did for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None yet for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None have for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None access for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None to for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None the for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node. for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Then for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None after for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None the for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None has for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None been for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None renumbered for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None we for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None gain for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None access, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None but for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None the for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None IP for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None has for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None been for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None changed. for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None In for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None this for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None case for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None we for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None must for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ignore for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None processing for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None of for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None this for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node. for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None return for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldStatus for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Determine for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ping_results for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None is for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None None: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeStatus.Up for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.rtt_min, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.rtt_avg, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.rtt_max, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.pkt_loss for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ping_results for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Add for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RTT for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None graph for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.RTT, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRARTT, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Latency', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'latency', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.rtt_avg, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.rtt_min, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.rtt_max) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Add for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None uptime for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None credit for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.uptime_last: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.uptime_so_far for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None (n.uptime_so_far for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None or for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 0) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None + for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None (datetime.now() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None - for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.uptime_last).seconds for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.uptime_last for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None datetime.now() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None else: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeStatus.Visible for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Measure for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packet for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None loss for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None with for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None different for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packet for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None sizes for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None generate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None a for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None graph for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ping_results for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None is for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None varsize_results for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None is for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None None: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None losses for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None [n.pkt_loss] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None + for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None varsize_results for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.PacketLoss, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRAPacketLoss, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Packet for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Loss', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'packetloss', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None *losses) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None is_duped: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeStatus.Duped for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeWarning.create(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None WarningCode.DupedReplies, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Generate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None change for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None events for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldStatus for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None (NodeStatus.Down, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeStatus.Pending, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeStatus.New) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None (NodeStatus.Up, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeStatus.Visible): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldStatus for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None (NodeStatus.New, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeStatus.Pending): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.first_seen for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None datetime.now() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.node_type for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None == for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeType.Mesh: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None generate_new_node_tweet(n) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Event.create_event(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventCode.NodeUp, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None elif for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldStatus for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None != for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeStatus.Duped for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None == for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeStatus.Duped: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Event.create_event(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventCode.PacketDuplication, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Add for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None olsr for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peer for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None count for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None graph for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.OlsrPeers, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRAOlsrPeers, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Routing for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Peers', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'olsrpeers', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.peers) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Add for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None LQ/ILQ for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None graphs for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.peers for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None > for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 0: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None lq_avg for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ilq_avg for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 0.0 for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peer for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peers: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None lq_avg for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None += for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None float(peer[1]) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ilq_avg for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None += for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None float(peer[2]) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None lq_graph for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.LQ, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRALinkQuality, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Average for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Link for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Quality', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'lq', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None lq_avg for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None / for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.peers, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ilq_avg for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None / for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.peers) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peer for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.src.all(): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peer.dst.ip, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.LQ, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRALinkQuality, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Link for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Quality for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None to for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None %s' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None % for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peer.dst, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'lq_peer_%s' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None % for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peer.dst.pk, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peer.lq, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None peer.ilq, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None parent for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None lq_graph) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.last_seen for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None datetime.now() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Check for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None we for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None have for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None fetched for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None nodewatcher for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None data for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None nodewatcher.fetch_node_info(node_ip) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None is for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'general' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None try: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldUptime for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.uptime for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None or for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 0 for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldChannel for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.channel for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None or for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 0 for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldVersion for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.firmware_version for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.firmware_version for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['general']['version'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.local_time for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_date_convert(info['general']['local_time']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.bssid for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['wifi']['bssid'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.essid for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['wifi']['essid'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.channel for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None nodewatcher.frequency_to_channel(info['wifi']['frequency']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.clients for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 0 for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.uptime for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_uptime_convert(info['general']['uptime']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'uuid' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['general']: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.reported_uuid for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['general']['uuid'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.reported_uuid for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.reported_uuid for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None != for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.uuid: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeWarning.create(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None WarningCode.MismatchedUuid, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldVersion for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None != for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.firmware_version: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Event.create_event(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventCode.VersionChange, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None data for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Old for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None version: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None %s\n for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None New for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None version: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None %s' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None % for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None (oldVersion, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.firmware_version)) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldUptime for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None > for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.uptime: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Event.create_event(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventCode.UptimeReset, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None data for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Old for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None uptime: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None %s\n for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None New for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None uptime: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None %s' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None % for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None (oldUptime, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.uptime)) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldChannel for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None != for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.channel for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldChannel for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None != for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 0: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Event.create_event(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventCode.ChannelChanged, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None data for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Old for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None channel: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None %s\n for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None New for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None channel for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None %s' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None % for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None (oldChannel, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.channel)) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.has_time_sync_problems(): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeWarning.create(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None WarningCode.TimeOutOfSync, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Parse for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None nodogsplash for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None client for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None information for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldNdsStatus for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.captive_portal_status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'nds' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'down' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['nds'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['nds']['down'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None == for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '1': for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.captive_portal_status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None False for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Create for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None a for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None warning for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None when for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None captive for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None portal for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None is for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None down for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None the for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None has for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None it for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None selected for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None its for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None image for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None generator for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None profile for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.profile for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None or for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.profile.use_captive_portal: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeWarning.create(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None WarningCode.CaptivePortalDown, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None else: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.captive_portal_status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None True for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None cid, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None client for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['nds'].iteritems(): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None cid.startswith('client'): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None continue for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None try: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None c for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None APClient.objects.get(node for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ip for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None client['ip']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None except for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None APClient.DoesNotExist: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None c for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None APClient(node for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.clients_so_far for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None += for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 1 for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.clients for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None += for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 1 for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None c.ip for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None client['ip'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None c.connected_at for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_date_convert(client['added_at']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None c.uploaded for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_int_convert(client['up']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None c.downloaded for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_int_convert(client['down']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None c.last_update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None datetime.now() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None c.save() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None else: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.captive_portal_status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None True for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Check for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None captive for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None portal for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None status for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None change for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldNdsStatus for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.captive_portal_status: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Event.create_event(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventCode.CaptivePortalDown, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None elif for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None oldNdsStatus for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.captive_portal_status: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Event.create_event(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventCode.CaptivePortalUp, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Generate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None a for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None graph for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None number for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None of for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None wifi for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None cells for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'cells' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['wifi']: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.WifiCells, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRAWifiCells, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Nearby for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Wifi for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Cells', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'wificells', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_int_convert(info['wifi']['cells']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None or for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 0) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node's for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None MAC for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None address for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None on for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None wifi for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iface for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'mac' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['wifi']: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.wifi_mac for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['wifi']['mac'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node's for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RTS for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None fragmentation for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None thresholds for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'rts' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['wifi'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'frag' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['wifi']: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.thresh_rts for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_int_convert(info['wifi']['rts']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None or for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 2347 for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.thresh_frag for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_int_convert(info['wifi']['frag']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None or for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 2347 for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Check for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None VPN for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None statistics for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'vpn' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.vpn_mac for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['vpn']['mac'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Generate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None a for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None graph for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None number for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None of for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None clients for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.Clients, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRAClients, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Connected for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Clients', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'clients', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.clients) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Check for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None IP for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None shortage for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None wifiSubnet for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.subnet_set.filter(gen_iface_type for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None IfaceType.WiFi, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None allocated for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None True) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None len(wifiSubnet) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.clients for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None > for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None max(0, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ipcalc.Network(wifiSubnet[0].subnet, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None wifiSubnet[0].cidr).size() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None - for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 4): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Event.create_event(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventCode.IPShortage, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None data for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Subnet: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None %s\n for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Clients: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None %s' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None % for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None (wifiSubnet[0], for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.clients)) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Record for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None interface for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None traffic for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None statistics for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None all for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None interfaces for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iid, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iface for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['iface'].iteritems(): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iid for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ('wifi0', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'wmaster0'): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Check for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None mappings for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None known for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None wifi for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None interfaces for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None so for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None we for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None can for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None handle for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None hardware for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None changes for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None while for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None the for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None node for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None is for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None up for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None generate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None useless for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None intermediate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None graphs for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.profile: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iface_wifi for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.profile.template.iface_wifi for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Template.objects.filter(iface_wifi for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iid).count() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None >= for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 1: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iid for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iface_wifi for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iid, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.Traffic, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRAIface, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Traffic for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None - for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None %s' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None % for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iid, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'traffic_%s' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None % for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iid, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iface['up'], for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None iface['down']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Generate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None load for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None average for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None statistics for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'loadavg' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['general']: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.loadavg_1min, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.loadavg_5min, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.loadavg_15min, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.numproc for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_loadavg_convert(info['general']['loadavg']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.LoadAverage, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRALoadAverage, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Load for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Average', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'loadavg', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.loadavg_1min, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.loadavg_5min, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.loadavg_15min) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.NumProc, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRANumProc, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Number for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None of for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Processes', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'numproc', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.numproc) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Generate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None free for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None memory for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None statistics for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'memfree' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['general']: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.memfree for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_int_convert(info['general']['memfree']) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None buffers for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_int_convert(info['general'].get('buffers', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 0)) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None cached for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None safe_int_convert(info['general'].get('cached', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 0)) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.MemUsage, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRAMemUsage, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Memory for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Usage', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'memusage', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.memfree, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None buffers, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None cached) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Generate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None solar for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None statistics for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None when for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None available for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'solar' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None all([x for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['solar'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None x for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ('batvoltage', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'solvoltage', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'charge', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'state', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'load')]): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None states for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None { for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'boost' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None : for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 1, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'equalize' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None : for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 2, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'absorption' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None : for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 3, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'float' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None : for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 4 for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GraphType.Solar, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None RRASolar, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'Solar for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Monitor', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'solar', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['solar']['batvoltage'], for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['solar']['solvoltage'], for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['solar']['charge'], for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None states.get(info['solar']['state'], for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 1), for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['solar']['load'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None ) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Check for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None installed for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None versions for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None (every for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None hour) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None try: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None last_pkg_update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.installedpackage_set.all()[0].last_update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None except: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None last_pkg_update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None last_pkg_update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None or for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None last_pkg_update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None < for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None datetime.now() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None - for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None timedelta(hours for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 1): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packages for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None nodewatcher.fetch_installed_packages(n.ip) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None or for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None {} for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Remove for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None removed for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packages for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None existing for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None versions for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.installedpackage_set.all(): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package.name for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packages: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package.delete() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None else: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package.version for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packages[package.name] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package.last_update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None datetime.now() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package.save() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None del for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packages[package.name] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Add for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None added for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packages for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packageName, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None version for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packages.iteritems(): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None InstalledPackage(node for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package.name for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None packageName for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package.version for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None version for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package.last_update for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None datetime.now() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None package.save() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Check for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None DNS for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None works for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'dns' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None in for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None old_dns_works for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.dns_works for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.dns_works for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None = for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['dns']['local'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None == for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '0' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None and for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None info['dns']['remote'] for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None == for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '0' for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None not for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.dns_works: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None NodeWarning.create(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None WarningCode.DnsDown, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None old_dns_works for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None != for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.dns_works: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Generate for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None a for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None proper for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None event for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None when for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None the for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None state for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None changes for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.dns_works: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Event.create_event(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventCode.DnsResolverRestored, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None else: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None Event.create_event(n, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventCode.DnsResolverFailed, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None '', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None EventSource.Monitor) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None except: for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None logging.warning(format_exc()) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None n.save() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None # for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None When for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None GC for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None debugging for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None is for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None enabled for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None perform for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None some for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None more for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None work for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None if for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None getattr(settings, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None 'MONITOR_ENABLE_GC_DEBUG', for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None None): for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None gc.collect() for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None return for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None os.getpid(), for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None len(gc.get_objects()) for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None return for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None None, for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None None for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None
480,771
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state'], 1), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
480,772
def generate_image(d): """ Generates an image accoording to given configuration. """ logging.debug(repr(d)) if d['imagebuilder'] not in IMAGEBUILDERS: raise Exception("Invalid imagebuilder specified!") x = OpenWrtConfig() x.setUUID(d['uuid']) x.setOpenwrtVersion(d['openwrt_ver']) x.setArch(d['arch']) x.setPortLayout(d['port_layout']) x.setWifiIface(d['iface_wifi'], d['driver'], d['channel']) x.setWifiAnt(d['rx_ant'], d['tx_ant']) x.setLanIface(d['iface_lan']) x.setNodeType("adhoc") x.setPassword(d['root_pass']) x.setHostname(d['hostname']) x.setIp(d['ip']) x.setSSID(d['ssid']) # Add WAN interface and all subnets if d['wan_dhcp']: x.addInterface("wan", d['iface_wan'], init = True) else: x.addInterface("wan", d['iface_wan'], d['wan_ip'], d['wan_cidr'], d['wan_gw'], init = True) for subnet in d['subnets']: x.addSubnet(str(subnet['iface']), str(subnet['network']), subnet['cidr'], subnet['dhcp'], True) x.setCaptivePortal(d['captive_portal']) if d['vpn']: x.setVpn(d['vpn_username'], d['vpn_password'], d['vpn_mac'], d['vpn_limit']) if d['lan_wifi_bridge']: x.enableLanWifiBridge() if d['lan_wan_switch']: x.switchWanToLan() # Add optional packages for package in d['opt_pkg']: x.addPackage(package) # Cleanup stuff from previous builds os.chdir(WORKDIR) os.system("rm -rf build/files/*") os.system("rm -rf build/%s/bin/*" % d['imagebuilder']) os.mkdir("build/files/etc") x.generate("build/files/etc") if d['only_config']: # Just pack configuration and send it prefix = hashlib.md5(os.urandom(32)).hexdigest()[0:16] tempfile = os.path.join(DESTINATION, prefix + "-config.zip") zip = ZipFile(tempfile, 'w', ZIP_DEFLATED) os.chdir('build/files') for root, dirs, files in os.walk("etc"): for file in files: zip.write(os.path.join(root, file)) zip.close() # Generate checksum f = open(tempfile, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() result = "%s-%s-config-%s.zip" % (d['hostname'], d['router_name'], filechecksum) destination = os.path.join(DESTINATION, result) os.rename(tempfile, destination) # Send an e-mail t = loader.get_template('generator/email_config.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'config' : result, 'checksum' : checksum, 'network' : { 'name' : settings.NETWORK_NAME, 'home' : settings.NETWORK_HOME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) }, 'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None) }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Configuration for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False ) else: # Generate full image x.build("build/%s" % d['imagebuilder']) # Read image version try: f = open(glob('%s/build/%s/build_dir/target-*/root-*/etc/version' % (WORKDIR, d['imagebuilder']))[0], 'r') version = f.read().strip().replace('.', '_') f.close() except: version = 'unknown' # Get resulting image files = [] for file, type in d['imagefiles']: file = str(file) source = "%s/build/%s/bin/%s" % (WORKDIR, d['imagebuilder'], file) f = open(source, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() ext = os.path.splitext(file)[1] result = "%s-%s-%s%s-%s%s" % (d['hostname'], d['router_name'], version, ("-%s" % type if type else ""), filechecksum, ext) destination = os.path.join(DESTINATION, result) os.rename(source, destination) files.append({ 'name' : result, 'checksum' : checksum }) # Send an e-mail t = loader.get_template('generator/email.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'files' : files, 'network' : { 'name' : settings.NETWORK_NAME, 'home' : settings.NETWORK_HOME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) }, 'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None) }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Router images for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False )
def generate_image(d): """ Generates an image accoording to given configuration. """ logging.debug(repr(d)) if d['imagebuilder'] not in IMAGEBUILDERS: raise Exception("Invalid imagebuilder specified!") x = OpenWrtConfig() x.setUUID(d['uuid']) x.setOpenwrtVersion(d['openwrt_ver']) x.setArch(d['arch']) x.setPortLayout(d['port_layout']) x.setWifiIface(d['iface_wifi'], d['driver'], d['channel']) x.setWifiAnt(d['rx_ant'], d['tx_ant']) x.setLanIface(d['iface_lan']) x.setNodeType("adhoc") x.setPassword(d['root_pass']) x.setHostname(d['hostname']) x.setIp(d['ip']) x.setSSID(d['ssid']) # Add WAN interface and all subnets if d['wan_dhcp']: x.addInterface("wan", d['iface_wan'], init = True) else: x.addInterface("wan", d['iface_wan'], d['wan_ip'], d['wan_cidr'], d['wan_gw'], init = True) for subnet in d['subnets']: x.addSubnet(str(subnet['iface']), str(subnet['network']), subnet['cidr'], subnet['dhcp'], True) x.setCaptivePortal(d['captive_portal']) if d['vpn']: x.setVpn(d['vpn_username'], d['vpn_password'], d['vpn_mac'], d['vpn_limit']) if d['lan_wifi_bridge']: x.enableLanWifiBridge() if d['lan_wan_switch']: x.switchWanToLan() # Add optional packages for package in d['opt_pkg']: x.addPackage(package) # Cleanup stuff from previous builds os.chdir(WORKDIR) os.system("rm -rf build/files/*") os.system("rm -rf build/%s/bin/*" % d['imagebuilder']) os.mkdir("build/files/etc") x.generate("build/files/etc") if d['only_config']: # Just pack configuration and send it prefix = hashlib.md5(os.urandom(32)).hexdigest()[0:16] tempfile = os.path.join(DESTINATION, prefix + "-config.zip") zip = ZipFile(tempfile, 'w', ZIP_DEFLATED) os.chdir('build/files') for root, dirs, files in os.walk("etc"): for file in files: zip.write(os.path.join(root, file)) zip.close() # Generate checksum f = open(tempfile, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() result = "%s-%s-config-%s.zip" % (d['hostname'], d['router_name'], filechecksum) destination = os.path.join(DESTINATION, result) os.rename(tempfile, destination) # Send an e-mail t = loader.get_template('generator/email_config.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'config' : result, 'checksum' : checksum, 'network' : { 'name' : settings.NETWORK_NAME, 'home' : settings.NETWORK_HOME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) }, 'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None) }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Configuration for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False ) else: # Generate full image x.build("build/%s" % d['imagebuilder']) # Read image version try: f = open(glob('%s/build/%s/build_dir/target-*/root-*/etc/version' % (WORKDIR, d['imagebuilder']))[0], 'r') version = f.read().strip().replace('.', '_') f.close() except: version = 'unknown' # Get resulting image files = [] for file, type in d['imagefiles']: file = str(file) source = "%s/build/%s/bin/%s" % (WORKDIR, d['imagebuilder'], file) f = open(source, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() ext = os.path.splitext(file)[1] router_name = d['router_name'].replace('-', '') result = "%s-%s-%s%s-%s%s" % (d['hostname'], router_name, version, ("-%s" % type if type else "-all"), filechecksum, ext) destination = os.path.join(DESTINATION, result) os.rename(source, destination) files.append({ 'name' : result, 'checksum' : checksum }) # Send an e-mail t = loader.get_template('generator/email.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'files' : files, 'network' : { 'name' : settings.NETWORK_NAME, 'home' : settings.NETWORK_HOME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) }, 'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None) }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Router images for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False )
480,773
def setcontext(parser, token): """ Sets (updates) current template context with the rendered output of the block inside tags. """ nodelist = parser.parse(('endsetcontext',)) args = list(token.split_contents()) if len(args) != 3 or args[1] != "as": raise TemplateSyntaxError("'%s' tag expected format is 'as name'" % args[0]) variable = args[2] parser.delete_first_token() return SetContextNode(nodelist, variable)
def setcontext(parser, token): """ Sets (updates) current template context with the rendered output of the block inside tags. """ nodelist = parser.parse(('endsetcontext',)) args = list(token.split_contents()) if len(args) != 3 or args[1] != "as": raise template.TemplateSyntaxError("'%s' tag expected format is 'as name'" % args[0]) variable = args[2] parser.delete_first_token() return SetContextNode(nodelist, variable)
480,774
def notice(parser, token): """ Renders notice. """ nodelist = parser.parse(('endnotice',)) args = list(token.split_contents()) if len(args) > 3: raise TemplateSyntaxError("'%s' tag requires at most two arguments" % args[0]) classes = args[2] if len(args) > 2 else '""' notice_type = args[1] if len(args) > 1 else '""' parser.delete_first_token() notice_type = parser.compile_filter(notice_type) classes = parser.compile_filter(classes) return NoticeNode(nodelist, notice_type, classes)
def notice(parser, token): """ Renders notice. """ nodelist = parser.parse(('endnotice',)) args = list(token.split_contents()) if len(args) > 3: raise template.TemplateSyntaxError("'%s' tag requires at most two arguments" % args[0]) classes = args[2] if len(args) > 2 else '""' notice_type = args[1] if len(args) > 1 else '""' parser.delete_first_token() notice_type = parser.compile_filter(notice_type) classes = parser.compile_filter(classes) return NoticeNode(nodelist, notice_type, classes)
480,775
def sticker(request): """ Display a form for generating an info sticker. """ user = UserAccount.for_user(request.user) # We want disabled error to show only after POST (to be same as image generation behavior) disabled = False if request.method == 'POST': form = InfoStickerForm(request.POST) if form.is_valid(): if getattr(settings, 'STICKERS_ENABLED', None): return HttpResponseRedirect(form.save(user)) else: disabled = True else: form = InfoStickerForm({ 'name' : user.name, 'phone' : user.phone, 'project' : user.project.id if user.project else 0 }) return render_to_response('nodes/sticker.html', { 'form' : form, 'stickers_disabled' : disabled }, context_instance = RequestContext(request) )
def sticker(request): """ Display a form for generating an info sticker. """ user = UserAccount.for_user(request.user) # We want disabled error to show only after POST (to be same as image generation behavior) disabled = False if request.method == 'POST': form = InfoStickerForm(request.POST) if form.is_valid(): if getattr(settings, 'STICKERS_ENABLED', None): return HttpResponseRedirect(form.save(user)) else: disabled = True else: form = InfoStickerForm(initial = { 'name' : user.name, 'phone' : user.phone, 'project' : user.project.id if user.project else 0 }) return render_to_response('nodes/sticker.html', { 'form' : form, 'stickers_disabled' : disabled }, context_instance = RequestContext(request) )
480,776