|
2 | 2 | from scipy.ndimage import morphology
|
3 | 3 | import numpy as np
|
4 | 4 | import cloudvolume
|
5 |
| -import fastremap |
6 | 5 | import datetime
|
7 | 6 | from taskqueue import RegisteredTask, queueable
|
8 | 7 | from cloudfiles import CloudFiles
|
9 | 8 | from caveclient import CAVEclient
|
| 9 | + |
10 | 10 | from requests import HTTPError
|
11 | 11 | import time
|
12 | 12 | from meshparty import trimesh_io, skeletonize, skeleton_io
|
13 | 13 | import os
|
14 | 14 | import copy
|
15 | 15 | from functools import partial
|
16 | 16 |
|
17 |
| -model1 = None |
18 |
| -model2 = None |
19 |
| -model3 = None |
20 |
| - |
21 | 17 |
|
22 | 18 | class NoIDFoundException(Exception):
|
23 | 19 | """there was no ID in the mask that wasn't excluded"""
|
@@ -862,3 +858,224 @@ def make_bnd_func(row):
|
862 | 858 |
|
863 | 859 | tasks = (make_bnd_func(row) for num, row in df.iterrows())
|
864 | 860 | return tasks
|
| 861 | + |
| 862 | + |
| 863 | +@queueable |
| 864 | +def execute_split( |
| 865 | + root_id, |
| 866 | + cut_ids, |
| 867 | + source_list, |
| 868 | + sink_list, |
| 869 | + datastack_name, |
| 870 | + auth_token, |
| 871 | + bucket_save_location, |
| 872 | +): |
| 873 | + client = CAVEclient(datastack_name, auth_token=auth_token) |
| 874 | + edit_success = np.zeros(len(source_list), bool) |
| 875 | + responses = [] |
| 876 | + print(f"editing {root_id}") |
| 877 | + for k, (cut_id, source_pts, sink_pts) in enumerate( |
| 878 | + zip(cut_ids, source_list, sink_list) |
| 879 | + ): |
| 880 | + try: |
| 881 | + operation_id, new_root_ids = client.chunkedgraph.execute_split( |
| 882 | + source_pts, sink_pts, root_id |
| 883 | + ) |
| 884 | + edit_success[k] = True |
| 885 | + d = { |
| 886 | + "root_id": root_id, |
| 887 | + "cut_id": cut_id, |
| 888 | + "success": True, |
| 889 | + "operation_id": operation_id, |
| 890 | + "new_root_ids": new_root_ids, |
| 891 | + } |
| 892 | + except HTTPError as e: |
| 893 | + d = { |
| 894 | + "root_id": root_id, |
| 895 | + "cut_id": cut_id, |
| 896 | + "success": False, |
| 897 | + "status_code": e.response.status_code, |
| 898 | + "message": str(e), |
| 899 | + } |
| 900 | + edit_success[k] = False |
| 901 | + responses.append(d) |
| 902 | + |
| 903 | + cf = CloudFiles(bucket_save_location) |
| 904 | + cf.put_json(f"{root_id}.json", responses) |
| 905 | + |
| 906 | + # if not np.all(edit_success): |
| 907 | + # raise Exception( |
| 908 | + # f"Only {np.sum(edit_success)} of {len(edit_success)} were successful" |
| 909 | + # ) |
| 910 | + |
| 911 | + |
| 912 | +def make_split_tasks( |
| 913 | + df, |
| 914 | + auth_token, |
| 915 | + bucket_save_location, |
| 916 | + datastack_name="minnie65_phase3_v1", |
| 917 | + root_col="segment_id", |
| 918 | + source_pt_col="valid_points", |
| 919 | + sink_pt_col="error_points", |
| 920 | + cut_id_col="cut_id", |
| 921 | +): |
| 922 | + def make_split_func(root_id, grp): |
| 923 | + bound_fn = partial( |
| 924 | + execute_split, |
| 925 | + root_id, |
| 926 | + grp[cut_id_col].tolist(), |
| 927 | + [r.tolist() for r in grp[source_pt_col]], |
| 928 | + [r.tolist() for r in grp[sink_pt_col]], |
| 929 | + datastack_name, |
| 930 | + auth_token, |
| 931 | + bucket_save_location, |
| 932 | + ) |
| 933 | + return bound_fn |
| 934 | + |
| 935 | + tasks = (make_split_func(root_id, grp) for root_id, grp in df.groupby(root_col)) |
| 936 | + return tasks |
| 937 | + |
| 938 | + |
| 939 | +@queueable |
| 940 | +def readjust_nuclei( |
| 941 | + nuc_id, |
| 942 | + nuc_pos, |
| 943 | + nuc_sv, |
| 944 | + nuc_pos_resolution, |
| 945 | + nuc_cv_path, |
| 946 | + seg_cv_path, |
| 947 | + save_cloudpath, |
| 948 | + radius, |
| 949 | +): |
| 950 | + print(f"adjusting nucleus {nuc_id}") |
| 951 | + # download the nucleus segmentation at the position given |
| 952 | + nuc_cv = cloudvolume.CloudVolume( |
| 953 | + nuc_cv_path, use_https=True, progress=False, fill_missing=True, bounded=False |
| 954 | + ) |
| 955 | + nuc_seg_centroid_id = nuc_cv.download_point( |
| 956 | + nuc_pos, size=1, coord_resolution=nuc_pos_resolution |
| 957 | + ) |
| 958 | + nuc_seg_centroid_id = int(np.squeeze(nuc_seg_centroid_id[0])) |
| 959 | + |
| 960 | + # if the nucleus isn't underneath this point, or if there is no supervoxel_id here |
| 961 | + # then lets see if we can find a nearby point that is in the segmentation and in the nucleus |
| 962 | + if nuc_seg_centroid_id != nuc_id or nuc_sv == 0: |
| 963 | + # initialize the segmentation cloud volume |
| 964 | + seg_cv = cloudvolume.CloudVolume( |
| 965 | + seg_cv_path, |
| 966 | + mip=nuc_cv.resolution, |
| 967 | + progress=False, |
| 968 | + fill_missing=True, |
| 969 | + bounded=False, |
| 970 | + ) |
| 971 | + |
| 972 | + cutout_nm = np.array([radius, radius, radius]) |
| 973 | + cutout_nuc_vx = (cutout_nm / nuc_cv.resolution).astype(np.int32) |
| 974 | + cutout_seg_vx = (cutout_nm / seg_cv.resolution).astype(np.int32) |
| 975 | + |
| 976 | + # convert to voxels for the different cloud volumes |
| 977 | + nuc_pos_vx = ( |
| 978 | + np.array(nuc_pos) * np.array(nuc_pos_resolution) / nuc_cv.resolution |
| 979 | + ).astype(np.int32) |
| 980 | + seg_pos_vx = ( |
| 981 | + np.array(nuc_pos) * np.array(nuc_pos_resolution) / seg_cv.resolution |
| 982 | + ).astype(np.int32) |
| 983 | + |
| 984 | + # setup bounding boxes for each cutout |
| 985 | + nuc_bbox = cloudvolume.Bbox( |
| 986 | + nuc_pos_vx - cutout_nuc_vx, nuc_pos_vx + cutout_nuc_vx |
| 987 | + ) |
| 988 | + seg_bbox = cloudvolume.Bbox( |
| 989 | + seg_pos_vx - cutout_seg_vx, seg_pos_vx + cutout_seg_vx |
| 990 | + ) |
| 991 | + # nuc_bbox = cloudvolume.Bbox.intersection(nuc_bbox, nuc_cv.bounds) |
| 992 | + # seg_bbox = cloudvolume.Bbox.intersection(seg_bbox, seg_cv.bounds) |
| 993 | + |
| 994 | + # get the cutouts |
| 995 | + nuc_cutout = np.squeeze(nuc_cv.download(nuc_bbox)) |
| 996 | + seg_cutout = np.squeeze(seg_cv.download(seg_bbox, agglomerate=False)) |
| 997 | + |
| 998 | + # make a mask of where the nucleus segmnentation matches the nucleus ID |
| 999 | + # and is at least 5 pixels from the border |
| 1000 | + nuc_mask = nuc_cutout == nuc_id |
| 1001 | + nuc_mask_erode = morphology.binary_erosion(nuc_mask, iterations=5) |
| 1002 | + |
| 1003 | + # make a mask of where the segmentation volume has data and is at least |
| 1004 | + # 5 pixels from the border |
| 1005 | + seg_mask = seg_cutout != 0 |
| 1006 | + seg_mask_erode = morphology.binary_erosion(seg_mask, iterations=5) |
| 1007 | + |
| 1008 | + # calculate the coordinates of where each voxels is in cutout coordinates |
| 1009 | + xs = np.arange(nuc_bbox.minpt[0], nuc_bbox.maxpt[0]) |
| 1010 | + ys = np.arange(nuc_bbox.minpt[1], nuc_bbox.maxpt[1]) |
| 1011 | + zs = np.arange(nuc_bbox.minpt[2], nuc_bbox.maxpt[2]) |
| 1012 | + xx, yy, zz = np.meshgrid(xs, ys, zs) |
| 1013 | + # find the voxels where both masks are true |
| 1014 | + xi, yi, zi = np.where((nuc_mask_erode) & (seg_mask_erode)) |
| 1015 | + |
| 1016 | + # if there are any voxels then find the closest one |
| 1017 | + if len(xi) != 0: |
| 1018 | + # get the distance in voxels to the center voxel |
| 1019 | + dx_vx = xx[xi, yi, zi] - nuc_pos_vx[0] |
| 1020 | + dy_vx = yy[xi, yi, zi] - nuc_pos_vx[1] |
| 1021 | + dz_vx = zz[xi, yi, zi] - nuc_pos_vx[2] |
| 1022 | + |
| 1023 | + # get the distance for each |
| 1024 | + dist_nm = np.linalg.norm( |
| 1025 | + np.vstack([dx_vx, dy_vx, dz_vx]).T * np.array(nuc_cv.resolution), axis=1 |
| 1026 | + ) |
| 1027 | + # get the closest one |
| 1028 | + close_point = np.argmin(dist_nm) |
| 1029 | + # need to add the index to the minpt to get the voxel index of the closest position |
| 1030 | + nuc_new_vx = nuc_bbox.minpt + [ |
| 1031 | + xi[close_point], |
| 1032 | + yi[close_point], |
| 1033 | + zi[close_point], |
| 1034 | + ] |
| 1035 | + # convert this to the voxel resolution of the given point |
| 1036 | + nuc_new_ann_vx = ( |
| 1037 | + nuc_new_vx * nuc_cv.resolution / nuc_pos_resolution |
| 1038 | + ).astype(np.int32) |
| 1039 | + d = {"nuc_id": nuc_id, "new_pos": nuc_new_ann_vx.tolist(), "success": True} |
| 1040 | + print("success") |
| 1041 | + else: |
| 1042 | + print("failed") |
| 1043 | + # if there are no such voxels, lets note our failure |
| 1044 | + d = {"nuc_id": nuc_id, "success": False} |
| 1045 | + cf = CloudFiles(save_cloudpath) |
| 1046 | + cf.put_json(f"{nuc_id}.json", d) |
| 1047 | + return d |
| 1048 | + else: |
| 1049 | + print("nothing to adjust") |
| 1050 | + return None |
| 1051 | + |
| 1052 | + |
| 1053 | +def make_nucleus_adjustment_tasks( |
| 1054 | + df, |
| 1055 | + nuc_cv_path, |
| 1056 | + seg_cv_path, |
| 1057 | + save_cloudpath, |
| 1058 | + position_column="pt_position", |
| 1059 | + nuc_id_column="id", |
| 1060 | + nuc_sv_column="pt_supervoxel_id", |
| 1061 | + nuc_pos_resolution=(4, 4, 40), |
| 1062 | + radius=3000, |
| 1063 | +): |
| 1064 | + def make_bnd_func(row): |
| 1065 | + |
| 1066 | + bound_fn = partial( |
| 1067 | + readjust_nuclei, |
| 1068 | + row[nuc_id_column], |
| 1069 | + row[position_column], |
| 1070 | + row[nuc_sv_column], |
| 1071 | + nuc_pos_resolution, |
| 1072 | + nuc_cv_path, |
| 1073 | + seg_cv_path, |
| 1074 | + save_cloudpath, |
| 1075 | + radius, |
| 1076 | + ) |
| 1077 | + |
| 1078 | + return bound_fn |
| 1079 | + |
| 1080 | + tasks = (make_bnd_func(row) for num, row in df.iterrows()) |
| 1081 | + return tasks |
0 commit comments