Skip to content

bug-fixes after testing on qiita-rc #81

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Feb 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion qp_klp/Step.py
Original file line number Diff line number Diff line change
Expand Up @@ -916,7 +916,7 @@ def execute_pipeline(self, qclient, increment_status, update=True,
self.convert_bcl_to_fastq()

increment_status()
if "QCJob" not in skip_steps:
if "NuQCJob" not in skip_steps:
self.quality_control()

increment_status()
Expand Down
29 changes: 12 additions & 17 deletions qp_klp/klp.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,17 +79,18 @@ def sequence_processing_pipeline(qclient, job_id, parameters, out_dir):
# successful.

# at minimum, ConvertJob needs to have been successful.
is_restart = True if exists(join(out_dir, 'NuQCJob')) else False
out_path = partial(join, out_dir)
is_restart = True if exists(out_path('NuQCJob')) else False

if is_restart:
# Assume ConvertJob directory exists and parse the job-script found
# there. If this is a restart, we won't be given the run-identifier,
# the lane number, and the sample-sheet as input parameters.
some_path = join(out_dir, 'ConvertJob', 'ConvertJob.sh')
result = ConvertJob.parse_job_script(some_path)
run_identifier = split(result['out_directory'])[-1]
user_input_file = result['sample_sheet_path']
sheet = load_sample_sheet(user_input_file)
run_identifier = split(result['run_directory'])[-1]
uif_path = result['sample_sheet_path']
sheet = load_sample_sheet(uif_path)
# on Amplicon runs, lane_number is always 1, and this will be
# properly reflected in the dummy sample-sheet as well.
lane_number = sheet.get_lane_number()
Expand All @@ -113,23 +114,17 @@ def sequence_processing_pipeline(qclient, job_id, parameters, out_dir):
user_input_file = parameters.pop('sample_sheet')
lane_number = parameters.pop('lane_number')

if {'body', 'content_type', 'filename'} != set(user_input_file):
return False, None, ("This doesn't appear to be a valid sample sheet "
"or mapping file; please review.")

out_path = partial(join, out_dir)
final_results_path = out_path('final_results')
makedirs(final_results_path, exist_ok=True)
# replace any whitespace in the filename with underscores
uif_path = out_path(user_input_file['filename'].replace(' ', '_'))

if is_restart:
pass
else:
if {'body', 'content_type', 'filename'} != set(user_input_file):
return False, None, ("This doesn't appear to be a valid sample "
"sheet or mapping file; please review.")
uif_path = out_path(user_input_file['filename'].replace(' ', '_'))
# save raw data to file
with open(uif_path, 'w') as f:
f.write(user_input_file['body'])

final_results_path = out_path('final_results')
makedirs(final_results_path, exist_ok=True)

if Pipeline.is_sample_sheet(uif_path):
with open(uif_path, 'r') as f:
assay = [x for x in f.readlines() if 'Assay' in x]
Expand Down
13 changes: 2 additions & 11 deletions qp_klp/tests/data/process_all_fastq_files.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,13 @@

echo "---------------"
echo "Run details:"
echo "$SLURM_JOB_NAME $SLURM_JOB_ID $SLURMD_NODENAME"
echo "$SLURM_JOB_NAME $SLURM_JOB_ID $SLURMD_NODENAME $SLURM_ARRAY_TASK_ID"
echo "---------------"

if [[ -z "${SLURM_ARRAY_TASK_ID}" ]]; then
echo "Not operating within an array"
exit 1
fi

if [[ "${SLURM_ARRAY_TASK_MIN}" -ne 1 ]]; then
echo "Min array ID is not 1"
exit 1
fi
if [[ -z ${MMI} ]]; then
echo "MMI is not set"
exit 1
Expand Down Expand Up @@ -56,11 +51,7 @@ fi
# DO NOT do this casually. Only do a clean up like this if
# you know for sure TMPDIR is what you want.

# we might got back to this TMPDIR once the slurm scheduler controls it
# TMPDIR=/dev/shm
# right now, let's use ${OUTPUT}, note that each worker will create a new tmp
# folder two lines below
TMPDIR=${OUTPUT}
TMPDIR=/dev/shm
export TMPDIR=${TMPDIR}
export TMPDIR=$(mktemp -d)
echo $TMPDIR
Expand Down