Skip to content

Commit

Permalink
Inisital version of LatentBKI
Browse files Browse the repository at this point in the history
  • Loading branch information
MultyXu committed Oct 13, 2024
1 parent 7009249 commit f40b8fc
Show file tree
Hide file tree
Showing 78 changed files with 11,102 additions and 34 deletions.
Binary file added .DS_Store
Binary file not shown.
44 changes: 11 additions & 33 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
Expand Down Expand Up @@ -49,7 +50,6 @@ coverage.xml
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
Expand All @@ -72,7 +72,6 @@ instance/
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
Expand All @@ -83,9 +82,7 @@ profile_default/
ipython_config.py

# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
Expand All @@ -94,24 +91,7 @@ ipython_config.py
# install all needed dependencies.
#Pipfile.lock

# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock

# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/

# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
Expand Down Expand Up @@ -148,15 +128,13 @@ dmypy.json
# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/
# Tensorflow event emitter
events.out.*
training_log.txt

# Cython debug symbols
cython_debug/
#
.vscode

# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
*.ckpt
Models/Lseg/lseg/*
PCAonGPU/PCA_instance/*
13 changes: 13 additions & 0 deletions Config/LatentBKI_default.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
dataset: "mp3d"
meas_result: True
with_variance: False
use_relative_pose: True
pseduo_discrete: True
save_map: True
result_split: "val"
grid_params:
grid_size: [ 100.0, 100.0, 100.0 ]
min_bound: [-5.0, -5.0, -5.0]
max_bound: [ 5.0, 5.0, 5.0]
filter_size: 3
ell: 0.5
12 changes: 12 additions & 0 deletions Config/LatentBKI_kitti.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
dataset: "semantic_kitti"
meas_result: True
with_variance: False
use_relative_pose: True
pseduo_discrete: True
save_map: True
grid_params:
grid_size: [ 400.0, 400.0, 26.0 ]
min_bound: [ -40.0, -40.0, -2.6 ]
max_bound: [ 40.0, 40.0, 2.6 ]
filter_size: 3
ell: 0.5
13 changes: 13 additions & 0 deletions Config/LatentBKI_realworld.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
dataset: "realworld"
meas_result: True
with_variance: False
use_relative_pose: True
pseduo_discrete: True
save_map: True
result_split: "val"
grid_params:
grid_size: [ 200.0, 200.0, 80.0,]
min_bound: [-5.0, -5.0, -2.0, ]
max_bound: [ 5.0, 5.0, 2.0,]
filter_size: 3
ell: 0.5
12 changes: 12 additions & 0 deletions Config/LatentBKI_vlmap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
dataset: "mp3d"
meas_result: True
with_variance: False
use_relative_pose: True
pseduo_discrete: True
save_map: True
grid_params:
grid_size: [ 240.0, 240.0, 80.0 ]
min_bound: [-6.0, -6.0, -2.0]
max_bound: [ 6.0, 6.0, 2.0]
filter_size: 1
ell: 0.5
59 changes: 59 additions & 0 deletions Config/mp3d.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
num_classes: 40
data_dir: "/mp3d/vlmaps_data_dir/vlmaps_dataset/"
feature_dir: 'lseg_feature'
pca_path: "PCAonGPU/PCA_instance/mp3d_pca_64.pkl"
feature_size: 64
grid_mask: False
down_sample_feature: True
raw_data: True
subsample_points: 1
intrinsic: [540, 0, 540, 0, 540, 360, 0, 0, 1]

sequences: [
'5LpN3gDmAk7_1',
'gTV8FGcVJC9_1',
]

category: [
"void",
"wall",
"floor",
"chair",
"door",
"table",
"picture",
"cabinet",
"cushion",
"window",
"sofa",
"bed",
"curtain",
"chest_of_drawers",
"plant",
"sink",
"stairs",
"ceiling",
"toilet",
"stool",
"towel",
"mirror",
"tv_monitor",
"shower",
"column",
"bathtub",
"counter",
"fireplace",
"lighting",
"beam",
"railing",
"shelving",
"blinds",
"gym_equipment",
"seating",
"board_panel",
"furniture",
"appliances",
"clothes",
"objects",
# "misc",
]
27 changes: 27 additions & 0 deletions Config/realworld.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
num_classes: 10
data_dir: "/mp3d/real_world"
feature_dir: 'lseg_feature'
pca_path: "PCAonGPU/PCA_instance/mp3d_pca_64.pkl"
feature_size: 64
grid_mask: True
down_sample_feature: True
raw_data: False
subsample_points: 1
intrinsic: [8.009776000976562500e+02, 0, 4.731800537109375000e+02, 0, 8.009776000976562500e+02, 3.628266601562500000e+02, 0, 0, 1]

sequences: [
"my_house_long",
]

category: [
"wall",
"floor",
"ceiling",
"chair",
"table",
"TV_screen",
"bed",
"window",
"lightings",
"other",
]
12 changes: 12 additions & 0 deletions Config/semantic_kitti.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
num_classes: 20
data_dir: "" # defined in SPVCNN kitti data config
feature_dir: "" # defined in SPVCNN kitti data config
pca_path: "" # never used
feature_size: 96
grid_mask: True
down_sample_feature: False
subsample_points: 1
raw_data: False
intrinsic: [] # don't use intrinsic
sequences: [] # defined in SPVCNN kitti data, choose val which only contain sequence 08
category: [] # defined in SPVCNN kitti data config
118 changes: 118 additions & 0 deletions Data/KITTI_SPVCNN.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
import torch
import yaml
import importlib

from easydict import EasyDict

from torch.utils.data import Dataset
from TwoDPASS.dataloader.dataset import get_model_class, get_collate_class
from TwoDPASS.dataloader.pc_dataset import get_pc_model_class

def load_yaml(file_name):
with open(file_name, 'r') as f:
try:
config = yaml.load(f, Loader=yaml.FullLoader)
except:
config = yaml.load(f)
return config


def parse_config():
args = {}
args['config_path'] = 'TwoDPASS/config/SPVCNN-semantickitti.yaml'
args['seed'] = 0
args['gpu'] = (0,)

# training
args['log_dir'] = 'default'
args['monitor'] = 'val/mIoU'
args['stop_patience'] = 50
args['save_top_k'] = 1
args['check_val_every_n_epoch'] = 1
args['SWA'] = False
args['baseline_only'] = False
# testing
args['test'] = True
args['fine_tune'] = False
args['pretrain2d'] = False
args['num_vote'] = 1
args['submit_to_server'] = False
args['checkpoint'] = 'TwoDPASS/pretrained/SPVCNN/best_model.ckpt'
# debug
args['debug'] = False

config = load_yaml(args['config_path'])
config.update(args) # override the configuration using the value in args

# voting test
if args['test']:
config['dataset_params']['val_data_loader']['batch_size'] = args['num_vote']
if args['num_vote'] > 1:
config['dataset_params']['val_data_loader']['rotate_aug'] = True
config['dataset_params']['val_data_loader']['transform_aug'] = True
if args['debug']:
config['dataset_params']['val_data_loader']['batch_size'] = 2
config['dataset_params']['val_data_loader']['num_workers'] = 0

return EasyDict(config)

class KITTI_SPVCNN_config():
def __init__(self) -> None:
self.config = parse_config()

class KITTI_SPVCNN(Dataset):
def __init__(self, device, grid_params, grid_mask=True) -> None:
super().__init__()
self.device = device
self.grid_mask = grid_mask
self._grid_size = grid_params['grid_size']
self.coor_ranges = grid_params['min_bound'] + grid_params['max_bound']
self.voxel_sizes = [abs(self.coor_ranges[3] - self.coor_ranges[0]) / self._grid_size[0],
abs(self.coor_ranges[4] - self.coor_ranges[1]) / self._grid_size[1],
abs(self.coor_ranges[5] - self.coor_ranges[2]) / self._grid_size[2]]
self.min_bound = torch.tensor(self.coor_ranges[:3])
self.max_bound = torch.tensor(self.coor_ranges[3:])
self.config = parse_config()
self.init_dataset()
self.init_model()

def init_dataset(self):
pc_dataset = get_pc_model_class(self.config['dataset_params']['pc_dataset_type'])
dataset_type = get_model_class(self.config['dataset_params']['dataset_type'])
val_config = self.config['dataset_params']['val_data_loader']
val_pt_dataset = pc_dataset(self.config, data_path=val_config['data_path'], imageset='val', num_vote=val_config["batch_size"])

self.kitti_dataset = dataset_type(val_pt_dataset, self.config, val_config, num_vote=val_config["batch_size"])
self.collate_fn = get_collate_class(self.config['dataset_params']['collate_type'])

def init_model(self):
model_file = importlib.import_module('TwoDPASS.network.' + self.config['model_params']['model_architecture'])
my_model = model_file.get_model(self.config) ######## get model ############
my_model = my_model.load_from_checkpoint(self.config.checkpoint, config=self.config, strict=(not self.config.pretrain2d))
my_model = my_model.eval()

self.my_model = my_model.to(self.device)

def __len__(self):
return len(self.kitti_dataset)

def __getitem__(self, idx):
return self.get_test_item(idx)

def get_test_item(self, idx):
data_dict = self.collate_fn([self.kitti_dataset[idx]])
with torch.no_grad():
features = self.my_model.encode_points(data_dict, self.device)

points = data_dict['points'].F
gt_labels = data_dict['targets_mapped'].F.reshape(-1,1)

# only take points in the grid
if self.grid_mask:
grid_point_mask = torch.all( (points < self.max_bound) & (points >= self.min_bound), axis=1)
points = points[grid_point_mask]
gt_labels = gt_labels[grid_point_mask]
features = features[grid_point_mask]

return data_dict['global_pose'][0], points, features, gt_labels, data_dict['scene_id'][0], data_dict['frame_id'][0]

Loading

0 comments on commit f40b8fc

Please sign in to comment.