Nabu patch processing from a python script

connect to the slurm - cluster

ssh -XC slurm-cluster

define the cluster node configuration you want to have

[1]:
cluster_config = {  # slurm cluster configuration to be used for each reconstruction
    "cpu-per-task": 4,
    "n_tasks": 1,
    "memory": 256,
    "partition": "p9gpu",
    "n_gpus": 1,
    "job_name": "nabu-batch-processing",
    "python_venv": "/scisoft/tomotools/activate.sh dev",
    # "modules": "tomotools",  # either the module or the python virtual environment should be provided
}

define the nabu settings which are different from the default values

for example here we change the default file format, the dataset location (mndatory for batch processing) and the algorithm to be used to compute the center of rotation

[2]:
def get_nabu_config(dataset_location, output_location):
    """
    :return: tuned parameter from  the default configuration of nabu
    You can tune need if needed by looking each key position directly in the generated nabu configuration
    """
    return {
        "dataset": {
            "location": dataset_location,
        },
        "reconstruction": {
            # Rotation axis position. It can be a number or the name of an estimation method (empty value means the middle of the detector).
            # The following methods are available to find automatically the Center of Rotation (CoR):
            #  - centered : a fast and simple auto-CoR method. It only works when the CoR is not far from the middle of the detector. It does not work for half-tomography.
            #  - global : a slow but robust auto-CoR.
            #  - sliding-window : semi-automatically find the CoR with a sliding window. You have to specify on which side the CoR is (left, center, right). Please see the 'cor_options' parameter.
            #  - growing-window : automatically find the CoR with a sliding-and-growing window. You can tune the option with the parameter 'cor_options'.
            #  - sino-coarse-to-fine: Estimate CoR from sinogram. Only works for 360 degrees scans.
            #  - composite-coarse-to-fine: Estimate CoR from composite multi-angle images. Only works for 360 degrees scans.
            "rotation_axis_position": "sliding-window",
        },
        "output": {
            "file_format": "tiff",
            "location": output_location,
        },
    }

define the remote processing to be done for each NXtomo - or scan

[3]:
import os
from nabu.pipeline.config import generate_nabu_configfile
from nabu.pipeline.fullfield.nabu_config import (
    nabu_config as nabu_fullfield_default_config,
)
from sluurp.job import SBatchScriptJob
from sluurp.executor import submit as submit_to_slurm_cluster
import uuid
from datetime import datetime


def treat_single_nx_tomo(nx_tomo_file):
    """
    treat a single .nx file
    """
    print(f"treat {nx_tomo_file}_file")
    # step1: save nabu configuration to file (near .nx file)
    nabu_conf_file = nx_tomo_file.replace(".nx", "_nabu.cfg")
    output_location = nx_tomo_file.replace(".nx", "_rec")
    generate_nabu_configfile(
        nabu_conf_file,
        nabu_fullfield_default_config,
        config=get_nabu_config(dataset_location=nx_tomo_file, output_location=output_location),
        options_level="advanced",
    )
    # step2 create SBatch job for slurm
    now_str = str(datetime.now()).replace(" ", "_")
    script_name = os.path.splitext(os.path.basename(nx_tomo_file))[0] + f"_{now_str}.sh"
    job = SBatchScriptJob(
        slurm_config=cluster_config,
        script=(
            f"python3 -m nabu.app.reconstruct {nabu_conf_file} --slice middle",
        ),  # TODO: remove '--slice middle' to reconstruct the full volume
        script_path=os.path.join(
            os.path.dirname(nx_tomo_file), "slurm_scripts", script_name
        ),
        clean_script=False,
        working_directory=os.path.realpath(os.path.dirname(nx_tomo_file)),
    )
    future_slurm_job = submit_to_slurm_cluster(job)
    # print("wait for {nx_tomo_file}")
    # future_slurm_job.result()
    # print("{nx_tomo_file} finished")

execute batch processing - do be done on a slurm-client (slurm cluster fron end)

[4]:
from glob import glob

# define nx_tomo to be processed
nxtomo_s = glob("/data/visitor/esXXXX/YY/ZZZZ/PROCESSED_DATA/*/*/*.nx")
nxtomo_s = (
    "/data/visitor//esXXXX/YY/ZZZZ/PROCESSED_DATA/*/*/aaa.nx",
    "/data/visitor//esXXXX/YY/ZZZZ/PROCESSED_DATA/*/*/aaa.nx",
)
for nxtomo in nxtomo_s:
    treat_single_nx_tomo(nxtomo)
treat /data/visitor//esXXXX/YY/ZZZZ/PROCESSED_DATA/*/*/aaa.nx_file
---------------------------------------------------------------------------
FileNotFoundError                         Traceback (most recent call last)
Cell In[4], line 10
      5 nxtomo_s = (
      6     "/data/visitor//esXXXX/YY/ZZZZ/PROCESSED_DATA/*/*/aaa.nx",
      7     "/data/visitor//esXXXX/YY/ZZZZ/PROCESSED_DATA/*/*/aaa.nx",
      8 )
      9 for nxtomo in nxtomo_s:
---> 10     treat_single_nx_tomo(nxtomo)

Cell In[3], line 20, in treat_single_nx_tomo(nx_tomo_file)
     18 nabu_conf_file = nx_tomo_file.replace(".nx", "_nabu.cfg")
     19 output_location = nx_tomo_file.replace(".nx", "_rec")
---> 20 generate_nabu_configfile(
     21     nabu_conf_file,
     22     nabu_fullfield_default_config,
     23     config=get_nabu_config(dataset_location=nx_tomo_file, output_location=output_location),
     24     options_level="advanced",
     25 )
     26 # step2 create SBatch job for slurm
     27 now_str = str(datetime.now()).replace(" ", "_")

File ~/.venv/py311/lib/python3.11/site-packages/nabu/pipeline/config.py:126, in generate_nabu_configfile(fname, default_config, config, sections, sections_comments, comments, options_level, prefilled_values)
    123         content = content + linesep
    124         fid.write(content)
--> 126 with open(fname, "w") as fid:
    127     for section, section_content in default_config.items():
    128         if section not in sections:

FileNotFoundError: [Errno 2] No such file or directory: '/data/visitor//esXXXX/YY/ZZZZ/PROCESSED_DATA/*/*/aaa_nabu.cfg'

note

to simplify this tutorial consider that you already have the NXtomo (.nx) files. If necessary you can do some preprocessing to retrieve the bliss file on RAW_DATA then obtain NXtomo from nxtomomill either from the command line interface or from the python API

[ ]: