Spaces:
Runtime error
Runtime error
import project_path | |
import numpy as np | |
import cv2 | |
import os | |
from collections import namedtuple, defaultdict | |
import struct | |
from PIL import Image | |
from tqdm import tqdm | |
import datetime | |
from decimal import Decimal, ROUND_HALF_UP | |
import pytz | |
from copy import deepcopy | |
from multiprocessing import Pool | |
import math | |
import lib.fish_eye.pyARIS as pyARIS | |
from lib.fish_eye.tracker import Tracker | |
BEAM_WIDTH_DIR = 'lib/fish_eye/beam_widths/' | |
ImageData = namedtuple('ImageData', [ | |
'pixel_meter_size', | |
'xdim', 'ydim', | |
'x_meter_start', 'y_meter_start', 'x_meter_stop', 'y_meter_stop', | |
'sample_read_rows', 'sample_read_cols', 'image_write_rows', 'image_write_cols' | |
]) | |
def FastARISRead(aris_fp, start_frame, end_frame): | |
ARISdata, aris_frame = pyARIS.DataImport(aris_fp) | |
frames = FastARISExtract(ARISdata, start_frame, end_frame) | |
return frames | |
def FastARISExtract(ARIS_data, start_frame, end_frame): | |
""" Just read in the ARIS frame, and not the other meta data. | |
""" | |
FrameSize = ARIS_data.SamplesPerChannel*ARIS_data.NumRawBeams | |
frames = np.empty([end_frame-start_frame, ARIS_data.SamplesPerChannel, | |
ARIS_data.NumRawBeams], dtype=np.uint8) | |
with open(ARIS_data.filename, 'rb') as data: | |
for i, j in enumerate(range(start_frame, end_frame)): | |
data.seek(j*(1024+(FrameSize))+2048, 0) | |
raw_data = struct.unpack("%dB" % FrameSize, data.read(FrameSize)) | |
frames[i] = np.fliplr(np.reshape( | |
raw_data, [ARIS_data.SamplesPerChannel, ARIS_data.NumRawBeams])) | |
# Close the data file | |
data.close() | |
return frames | |
def get_info(aris_fp, beam_width_dir=BEAM_WIDTH_DIR): | |
""" | |
Return: | |
image_meter_width, image_meter_height, fps | |
""" | |
ARISdata, aris_frame = pyARIS.DataImport(aris_fp) | |
beam_width_data = pyARIS.load_beam_width_data(aris_frame, beam_width_dir=beam_width_dir)[0] | |
min_pixel_size = pyARIS.get_minimum_pixel_meter_size(aris_frame, beam_width_data) | |
sample_length = aris_frame.sampleperiod * 0.000001 * aris_frame.soundspeed / 2 | |
pixel_meter_size = max(min_pixel_size, sample_length) | |
xdim, ydim, x_meter_start, y_meter_start, x_meter_stop, y_meter_stop = pyARIS.compute_image_bounds( | |
pixel_meter_size, aris_frame, beam_width_data, | |
additional_pixel_padding_x=0, | |
additional_pixel_padding_y=0 | |
) | |
return pixel_meter_size * xdim, pixel_meter_size * ydim, aris_frame.framerate | |
def write_frames(aris_fp, out_dir, max_mb=-1, beam_width_dir=BEAM_WIDTH_DIR, bg_out_dir=None, num_workers=0): | |
""" | |
Write all frames from an ARIS file to disk, using our 3-channel format: | |
(raw img, blurred & mean subtracted img, optical flow approximation) | |
Args: | |
aris_fp: path to aris file | |
out_dir: directory for frame extraction. frames will be named 0.jpg, 1.jpg, ... {n}.jpg | |
max_mb: maximum amount of the file to be processed, in megabytes | |
beam_width_dir: location of ARIS camera information | |
bg_out_dir: where to write the background frame; None disables writing | |
Return: | |
(float) image_meter_width - the width of each image, in meters | |
(float) image_meter_height | |
(float) fps | |
""" | |
# Load in the ARIS file | |
ARISdata, aris_frame = pyARIS.DataImport(aris_fp) | |
beam_width_data = pyARIS.load_beam_width_data(aris_frame, beam_width_dir=beam_width_dir)[0] | |
# What is the meter resolution of the smallest sample? | |
min_pixel_size = pyARIS.get_minimum_pixel_meter_size(aris_frame, beam_width_data) | |
# What is the meter resolution of the sample length? | |
sample_length = aris_frame.sampleperiod * 0.000001 * aris_frame.soundspeed / 2 | |
# Choose the size of a pixel (or hard code it to some specific value) | |
pixel_meter_size = max(min_pixel_size, sample_length) | |
# Determine the image dimensions | |
xdim, ydim, x_meter_start, y_meter_start, x_meter_stop, y_meter_stop = pyARIS.compute_image_bounds( | |
pixel_meter_size, aris_frame, beam_width_data, | |
additional_pixel_padding_x=0, | |
additional_pixel_padding_y=0 | |
) | |
# Compute the mapping from the samples to the image | |
sample_read_rows, sample_read_cols, image_write_rows, image_write_cols = pyARIS.compute_mapping_from_sample_to_image( | |
pixel_meter_size, | |
xdim, ydim, x_meter_start, y_meter_start, | |
aris_frame, beam_width_data | |
) | |
image_data = ImageData( | |
pixel_meter_size, | |
xdim, ydim, x_meter_start, y_meter_start, x_meter_stop, y_meter_stop, | |
sample_read_rows, sample_read_cols, image_write_rows, image_write_cols | |
) | |
start_frame = 0 | |
end_frame = ARISdata.FrameCount | |
bytes_per_frame = 1024+ARISdata.SamplesPerChannel*ARISdata.NumRawBeams | |
print("ARIS bytes per frame", bytes_per_frame) | |
img_bytes_per_frame = image_data.ydim * image_data.xdim * 4 # for fp32 frames | |
print("Image bytes per frame", img_bytes_per_frame) | |
max_bytes = max(bytes_per_frame, img_bytes_per_frame) | |
if max_mb > 0: | |
max_frames = int(max_mb*1000000 / (max_bytes)) | |
if end_frame > max_frames: | |
end_frame = max_frames | |
# use a max of 4gb per batch to avoid memory errors (16gb RAM on a g4dn.xlarge) | |
batch_size = 1000 # int(4000*1000000 / (max_bytes)) | |
clips = [[pos, pos+batch_size+1] for pos in range(0, end_frame, batch_size)] | |
clips[-1][1] = ARISdata.FrameCount | |
print("Batch size:", batch_size) | |
with tqdm(total=(end_frame-start_frame-1), desc="Extracting frames", ncols=0) as pbar: | |
# compute info for bg subtraction using first batch | |
# TODO: make this a sliding window | |
mean_blurred_frame, mean_normalization_value = write_frame_range(ARISdata, image_data, out_dir, clips[0][0], clips[0][1], None, None, pbar) | |
# do rest of batches in parallel | |
if num_workers > 0: | |
args = [ (ARISdata, image_data, out_dir, start, end, mean_blurred_frame, mean_normalization_value) for (start, end) in clips[1:] ] # TODO: can't pass pbar to thread | |
with Pool(num_workers) as pool: | |
results = [ pool.apply_async(write_frame_range, arg) for arg in args ] | |
results = [ r.get() for r in results ] # need this call to block on thread execution | |
pbar.update(sum([ arg[4] - arg[3] for arg in args ])) | |
else: | |
for j, (start, end) in enumerate(clips[1:]): | |
write_frame_range(ARISdata, image_data, out_dir, start, end, mean_blurred_frame, mean_normalization_value, pbar) | |
if bg_out_dir is not None: | |
bg_img = (mean_blurred_frame * 255).astype(np.uint8) | |
out_fp = os.path.join(bg_out_dir, 'bg_start.jpg') | |
Image.fromarray(bg_img).save(out_fp, quality=95) | |
return pixel_meter_size * xdim, pixel_meter_size * ydim, aris_frame.framerate | |
def write_frame_range(ARISdata, image_data, out_dir, start, end, mean_blurred_frame=None, mean_normalization_value=None, pbar=None): | |
try: | |
frames = np.zeros([end-start, image_data.ydim, image_data.xdim], dtype=np.uint8) | |
frames[:, image_data.image_write_rows, image_data.image_write_cols] = FastARISExtract(ARISdata, start, end)[:, image_data.sample_read_rows, image_data.sample_read_cols] | |
except: | |
print("Error extracting frames from", ARISdata.filename, "during batch", i) | |
return | |
blurred_frames = frames.astype(np.float32) | |
for i in range(frames.shape[0]): | |
blurred_frames[i] = cv2.GaussianBlur( | |
blurred_frames[i], | |
(5,5), | |
0 | |
) | |
if mean_blurred_frame is None: | |
mean_blurred_frame = blurred_frames.mean(axis=0) | |
blurred_frames -= mean_blurred_frame | |
if mean_normalization_value is None: | |
mean_normalization_value = np.max(np.abs(blurred_frames)) | |
blurred_frames /= mean_normalization_value | |
blurred_frames += 1 | |
blurred_frames /= 2 | |
# Because of the optical flow computation, we only go to end_frame - 1 | |
for i, frame_offset in enumerate(range(start, end - 1)): | |
frame_image = np.dstack([ | |
frames[i] / 255, | |
blurred_frames[i], | |
np.abs(blurred_frames[i+1] - blurred_frames[i]) | |
]).astype(np.float32) | |
frame_image = (frame_image * 255).astype(np.uint8) | |
out_fp = os.path.join(out_dir, f'{start+i}.jpg') # = frame_offset.jpg? | |
Image.fromarray(frame_image).save(out_fp, quality=95) | |
if pbar: | |
pbar.update(1) | |
return mean_blurred_frame, mean_normalization_value | |
def prep_for_mm(json_data): | |
"""Prepare json results for writing to a manual marking file.""" | |
json_data = deepcopy(json_data) | |
# map fish id -> [ (bbox, frame_num), (bbox, frame_num), ... ] | |
tracks = defaultdict(list) | |
for frame in json_data['frames']: | |
for bbox in frame['fish']: | |
tracks[bbox['fish_id']].append((bbox['bbox'], frame['frame_num'])) | |
# find frame number for manual marking | |
# look for first time a track crosses the center | |
# if it never crosses the center, use the closest box to the center | |
mm_frame_nums = {} | |
for f_id, track in tracks.items(): | |
# keep track of frame closest to the center | |
closest_frame = 0 | |
closest_dist = 1.0 | |
for i, (box, frame) in enumerate(track): | |
x = (box[0] + box[2]) / 2.0 | |
if i > 0: | |
last_x = (track[i-1][0][0] + track[i-1][0][2]) / 2.0 | |
if (x < 0.5 and last_x >= 0.5) or (last_x < 0.5 and x >= 0.5): | |
closest_frame = frame | |
break | |
dist = abs(x - 0.5) | |
if dist < closest_dist: | |
closest_frame = frame | |
closest_dist = dist | |
mm_frame_nums[f_id] = closest_frame | |
# sort tracks by their frame numbers and re-key | |
# IDs are 1-indexed | |
id_frame = [ (k, v) for k,v in mm_frame_nums.items() ] | |
id_frame = sorted(id_frame, key=lambda x: x[1]) | |
id_map = {} | |
for i, (f_id, frame) in enumerate(id_frame, start=1): | |
id_map[f_id] = i | |
# map IDs and keep frame['fish'] sorted by ID | |
for i, frame in enumerate(json_data['frames']): | |
new_frame_entries = [] | |
for frame_entry in frame['fish']: | |
frame_entry['fish_id'] = id_map[frame_entry['fish_id']] | |
new_frame_entries.append(frame_entry) | |
frame['fish'] = sorted(new_frame_entries, key=lambda k: k['fish_id']) | |
# store manual marking frame and re-map 'fish' field | |
for fish in json_data['fish']: | |
fish['marking_frame'] = mm_frame_nums[fish['id']] # mm_frame_nums refers to old IDs | |
fish['id'] = id_map[fish['id']] | |
json_data['fish'] = sorted(json_data['fish'], key=lambda x: x['id']) | |
return json_data | |
def add_metadata_to_result(aris_fp, json_data, beam_width_dir=BEAM_WIDTH_DIR): | |
""" | |
Return: | |
dictionary, for manual marking | |
""" | |
metadata = {} | |
metadata["FILE_NAME"] = aris_fp | |
if (aris_fp.endswith(".aris")): | |
ARISdata, frame = pyARIS.DataImport(aris_fp) | |
else: | |
ARISdata = None | |
frame = None | |
metadata["FRAME_RATE"] = frame.framerate if frame is not None else 1 | |
if (frame is not None): | |
# Load in the beam width information | |
beam_width_data, camera_type = pyARIS.load_beam_width_data(frame, beam_width_dir=beam_width_dir) | |
# What is the meter resolution of the smallest sample? | |
min_pixel_size = pyARIS.get_minimum_pixel_meter_size(frame, beam_width_data) | |
# What is the meter resolution of the sample length? | |
sample_length = frame.sampleperiod * 0.000001 * frame.soundspeed / 2 | |
# Choose the size of a pixel | |
pixel_meter_size = max(min_pixel_size, sample_length) | |
# Determine the image dimensions | |
xdim, ydim, x_meter_start, y_meter_start, x_meter_stop, y_meter_stop = pyARIS.compute_image_bounds( | |
pixel_meter_size, frame, beam_width_data, | |
additional_pixel_padding_x=0, | |
additional_pixel_padding_y=0 | |
) | |
# Compute the mapping from the samples to the image | |
sample_read_rows, sample_read_cols, image_write_rows, image_write_cols = pyARIS.compute_mapping_from_sample_to_image( | |
pixel_meter_size, | |
xdim, ydim, x_meter_start, y_meter_start, | |
frame, beam_width_data | |
) | |
marking_mapping = dict(zip(zip(image_write_rows, image_write_cols), | |
zip(sample_read_rows, sample_read_cols))) | |
# Manual marking format rounds 0.5 to 1 instead of 0 in IEEE 754 | |
def round(number, ndigits=0): | |
return float(Decimal(number).quantize(ndigits, ROUND_HALF_UP)) | |
right, left, none = Tracker.count_dirs(json_data) | |
metadata["UPSTREAM_FISH"] = left # TODO | |
metadata["DOWNSTREAM_FISH"] = right # TODO | |
metadata["NONDIRECTIONAL_FISH"] = none # TODO | |
metadata["TOTAL_FISH"] = metadata["UPSTREAM_FISH"] + metadata["DOWNSTREAM_FISH"] + metadata["NONDIRECTIONAL_FISH"] | |
metadata["TOTAL_FRAMES"] = ARISdata.FrameCount if ARISdata is not None else 1 | |
metadata["EXPECTED_FRAMES"] = -1 # What is this? | |
metadata["TOTAL_TIME"] = str(datetime.timedelta(seconds=round(metadata["TOTAL_FRAMES"]/metadata["FRAME_RATE"]))) | |
metadata["EXPECTED_TIME"] = str(datetime.timedelta(seconds=round(metadata["EXPECTED_FRAMES"]/metadata["FRAME_RATE"]))) | |
metadata["UPSTREAM_MOTION"] = 'Right To Left' or 'Left To Right' #TODO | |
metadata["COUNT_FILE_NAME"] = 'N/A' | |
metadata["EDITOR_ID"] = 'N/A' | |
metadata["INTENSITY"] = f'{round(frame.intensity, 1) if frame is not None else 0:.1f} dB' # Missing | |
metadata["THRESHOLD"] = f'{round(frame.threshold, 1) if frame is not None else 0:.1f} dB' # Missing | |
metadata["WINDOW_START"] = round(frame.windowstart, 2) if frame is not None else 0 | |
metadata["WINDOW_END"] = round(frame.windowstart + frame.windowlength, 2) if frame is not None else 0 | |
metadata["WATER_TEMP"] = f'{int(round(frame.watertemp)) if frame is not None else 0} degC' | |
s = f'''''' | |
upstream_motion_map = {} | |
if (metadata["UPSTREAM_MOTION"] == 'Left To Right'): | |
upstream_motion_map = { | |
'right': ' Up', | |
'left': 'Down', | |
'none': ' N/A', | |
} | |
elif (metadata["UPSTREAM_MOTION"] == 'Right To Left'): | |
upstream_motion_map = { | |
'left': ' Up', | |
'right': 'Down', | |
'none': ' N/A', | |
} | |
def get_entry(fish): | |
if 'marking_frame' in fish: | |
frame_num = fish['marking_frame'] | |
entry = None | |
for json_frame in json_data['frames']: | |
if json_frame['frame_num'] == frame_num: | |
for json_frame_entry in json_frame['fish']: | |
if json_frame_entry['fish_id'] == fish['id']: | |
json_frame_entry = json_frame_entry.copy() | |
json_frame_entry['frame_num'] = frame_num | |
return json_frame_entry | |
else: | |
print("Warning: JSON not correctly formatted for manual marking creation. Use aris.prep_for_mm()") | |
entries = [] | |
for json_frame in json_data['frames']: | |
for json_frame_entry in json_frame['fish']: | |
if json_frame_entry['fish_id'] == fish['id']: | |
entries.append({'frame_num': json_frame['frame_num'], **json_frame_entry}) | |
entry = entries[len(entries)//2] | |
return entry | |
print("Error, could not find entry for", fish) | |
return None # TODO better error handling | |
entries = [] | |
for fish in json_data['fish']: | |
entry = get_entry(fish) | |
entry['length'] = fish['length']*100 | |
entry['direction'] = fish['direction'] | |
entry['travel_dist'] = fish['travel_dist'] | |
entry['start_frame_index'] = fish['start_frame_index'] | |
entry['end_frame_index'] = fish['end_frame_index'] | |
entries.append(entry) | |
metadata["FISH"] = [] | |
for entry in sorted(entries, key=lambda x: x['fish_id']): | |
frame_num = entry['frame_num'] | |
if ARISdata is not None: | |
frame = pyARIS.FrameRead(ARISdata, frame_num) | |
else: | |
frame = None | |
y = (entry['bbox'][1]+entry['bbox'][3])/2 | |
x = (entry['bbox'][0]+entry['bbox'][2])/2 | |
h = np.max(image_write_rows) | |
w = np.max(image_write_cols) | |
# TODO actually fix this | |
try: | |
bin_num, beam_num = marking_mapping[(round(y*h), round(x*w))] | |
except: | |
bin_num = 0 | |
beam_num = 0 | |
fish_entry = {} | |
fish_entry['FILE'] = 1 | |
fish_entry['TOTAL'] = entry['fish_id'] | |
fish_entry['FRAME_NUM'] = entry['frame_num'] | |
fish_entry['START_FRAME'] = entry['start_frame_index'] | |
fish_entry['END_FRAME'] = entry['end_frame_index'] | |
fish_entry['NBR_FRAMES'] = entry['end_frame_index'] + 1 - entry['start_frame_index'] | |
fish_entry['TRAVEL'] = entry['travel_dist'] | |
fish_entry['DIR'] = upstream_motion_map[entry['direction']] | |
fish_entry['R'] = bin_num * pixel_meter_size + frame.windowstart if frame is not None else "-" | |
fish_entry['THETA'] = beam_width_data['beam_center'][beam_num] | |
fish_entry['L'] = entry['length'] | |
fish_entry['DR'] = -1.0 # What is this? | |
fish_entry['LDR'] = -1.0 # What is this? | |
fish_entry['ASPECT'] = -1.0 # What is this? | |
TIME, DATE = datetime.datetime.fromtimestamp(frame.sonartimestamp/1000000, pytz.timezone('UTC')).strftime('%H:%M:%S %Y-%m-%d').split() if frame is not None else ("-", "-") | |
fish_entry['TIME'] = TIME | |
fish_entry['DATE'] = DATE | |
fish_entry['LATITUDE'] = frame.latitude or 'N 00 d 0.00000 m' if frame is not None else 0 | |
fish_entry['LONGITUDE'] = frame.longitude or 'E 000 d 0.00000 m' if frame is not None else 0 | |
fish_entry['PAN'] = frame.sonarpan if frame is not None else 0 | |
if math.isnan(fish_entry['PAN']): fish_entry['PAN'] = "nan" | |
fish_entry['TILT'] = frame.sonartilt if frame is not None else 0 | |
if math.isnan(fish_entry['TILT']): fish_entry['TILT'] = "nan" | |
fish_entry['ROLL'] = frame.roll if frame is not None else 0 # May be wrong number but sonarroll was NaN | |
fish_entry['SPECIES'] = 'Unknown' | |
fish_entry['MOTION'] = 'Running <-->' | |
fish_entry['Q'] = -1 #5 # I don't know what this is or where it comes from | |
fish_entry['N'] = -1 #1 # I don't know what this is or where it comes from | |
fish_entry['COMMENT'] = '' | |
metadata["FISH"].append(fish_entry) | |
# What are these? | |
# Maybe the date and time range for the recording? | |
if ARISdata is not None: | |
first_frame = pyARIS.FrameRead(ARISdata, 0) | |
last_frame = pyARIS.FrameRead(ARISdata, metadata["TOTAL_FRAMES"]-1) | |
start_time, start_date = datetime.datetime.fromtimestamp(first_frame.sonartimestamp/1000000, pytz.timezone('UTC')).strftime('%H:%M:%S %Y-%m-%d').split() | |
end_time, end_date = datetime.datetime.fromtimestamp(last_frame.sonartimestamp/1000000, pytz.timezone('UTC')).strftime('%H:%M:%S %Y-%m-%d').split() | |
else: | |
start_date = 0 | |
start_time = 0 | |
end_time = 0 | |
metadata["DATE"] = start_date | |
metadata["START"] = start_time | |
metadata["END"] = end_time | |
json_data['metadata'] = metadata | |
return json_data | |
def create_manual_marking(results, out_path=None): | |
""" | |
Return: | |
string, full contents of manual marking | |
""" | |
metadata = results['metadata'] | |
s = f''' | |
Total Fish = {metadata["TOTAL_FISH"]} | |
Upstream = {metadata["UPSTREAM_FISH"]} | |
Downstream = {metadata["DOWNSTREAM_FISH"]} | |
?? = {metadata["NONDIRECTIONAL_FISH"]} | |
Total Frames = {metadata["TOTAL_FRAMES"]} | |
Expected Frames = {metadata["EXPECTED_FRAMES"]} | |
Total Time = {metadata["TOTAL_TIME"]} | |
Expected Time = {metadata["EXPECTED_TIME"]} | |
Upstream Motion = {metadata["UPSTREAM_MOTION"]} | |
Count File Name: {metadata["COUNT_FILE_NAME"]} | |
Editor ID = {metadata["EDITOR_ID"]} | |
Intensity = {metadata["INTENSITY"]} | |
Threshold = {metadata["THRESHOLD"]} | |
Window Start = {metadata["WINDOW_START"]:.2f} | |
Window End = {metadata["WINDOW_END"]:.2f} | |
Water Temperature = {metadata["WATER_TEMP"]} | |
*** Manual Marking (Manual Sizing: Q = Quality, N = Repeat Count) *** | |
File Total Frame# Dir R (m) Theta L(cm) dR(cm) L/dR Aspect Time Date Latitude Longitude Pan Tilt Roll Species Motion Q N Comment | |
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | |
''' | |
for fish in metadata["FISH"]: | |
entry = {} | |
for field in fish.keys(): | |
if fish[field] == "nan": | |
entry[field] = math.nan | |
else: | |
entry[field] = fish[field] | |
s += f'{entry["FILE"]:>4} {entry["TOTAL"]:>5} {entry["FRAME_NUM"]:>6} {entry["DIR"]:>3} {entry["R"]:>6.2f} {entry["THETA"]:>6.1f} {entry["L"]:>6.1f} {entry["DR"]:>6.1f} {entry["LDR"]:>6.2f} {entry["ASPECT"]:>6.1f} {entry["TIME"]:>8} {entry["DATE"]:>10} {entry["LATITUDE"]:>17} {entry["LONGITUDE"]:>18} {entry["PAN"]:>7.2f} {entry["TILT"]:>7.2f} {entry["ROLL"]:>7.2f} {entry["SPECIES"]:>8} {entry["MOTION"]:>37} {entry["Q"]:>5} {entry["N"]:>2} {entry["COMMENT"]}\n' | |
s += f''' | |
*** Source File Key *** | |
1. Source File Name: {metadata["FILE_NAME"]} | |
Source File Date: {metadata["DATE"]} | |
Source File Start: {metadata["START"]} | |
Source File End: {metadata["END"]} | |
Settings | |
Upstream: {metadata["UPSTREAM_MOTION"]} | |
Default Mark Direction: Upstream | |
Editor ID: {metadata["EDITOR_ID"]} | |
Show Marks: ?? | |
Show marks for ?? seconds | |
Loop for ?? seconds | |
''' | |
if out_path: | |
with open(out_path, 'w') as f: | |
f.write(s) | |
return s |