-
Notifications
You must be signed in to change notification settings - Fork 3
Bdv update library #118
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Bdv update library #118
Changes from 5 commits
c669c0f
d19462d
679e5d3
24126c7
4700635
8137c3c
db65398
a03a62e
ee68e96
6e7bdfe
6308fbe
a17e0a9
451f84c
5611cb9
4ebc024
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -21,7 +21,6 @@ | |
| from .. import pathtools | ||
| from ..log import LOG as log | ||
|
|
||
|
|
||
| # internal template strings used in string formatting (note: the `"""@private"""` | ||
| # pseudo-decorator is there to instruct [pdoc] to omit those variables when generating | ||
| # API documentation): | ||
|
|
@@ -902,6 +901,7 @@ def define_dataset_manual( | |
| image_file_pattern, | ||
| dataset_organisation, | ||
| definition_opts=None, | ||
| list_files=None, | ||
| ): | ||
| """Run "Define Multi-View Dataset" using the "Manual Loader" option. | ||
|
|
||
|
|
@@ -920,20 +920,23 @@ def define_dataset_manual( | |
| Looks like "timepoints_=%s-%s channels_=0-%s tiles_=%s-%s" | ||
| definition_opts : dict | ||
| Dictionary containing the details about the file repartitions. | ||
| list_files : list of str, optional | ||
| An optional list of file names to pass directly to the manual loader in | ||
| "show_list" mode. When provided, the function will include the filenames | ||
| in the options string instead of relying on a file pattern; items should | ||
| be either full paths or relative to the selected `source_directory`. | ||
| """ | ||
|
|
||
| xml_filename = project_filename + ".xml" | ||
| # xml_filename = project_filename + ".xml" | ||
|
|
||
| if definition_opts is None: | ||
| definition_opts = DefinitionOptions() | ||
| definition_opts = bdv.DefinitionOptions() | ||
|
|
||
| temp = os.path.join(source_directory, project_filename + "_temp") | ||
| os.path.join(temp, project_filename) | ||
| show_list_options = "" if not list_files else "show_list " + " ".join(list_files) | ||
|
|
||
| options = ( | ||
| "define_dataset=[Manual Loader (Bioformats based)] " | ||
| + "project_filename=[" | ||
| + xml_filename | ||
| + project_filename | ||
| + "] " | ||
| + "_____" | ||
| + definition_opts.fmt_acitt_options() | ||
|
|
@@ -943,11 +946,12 @@ def define_dataset_manual( | |
| + " " | ||
| + "image_file_pattern=" | ||
| + image_file_pattern | ||
| + " " | ||
| + dataset_organisation | ||
| + " " | ||
| + "calibration_type=[Same voxel-size for all views] " | ||
| + "calibration_definition=[Load voxel-size(s) from file(s)] " | ||
| # + "imglib2_data_container=[ArrayImg (faster)]" | ||
| + show_list_options | ||
| ) | ||
|
|
||
| log.debug("Manual dataset definition options: <%s>", options) | ||
|
|
@@ -991,7 +995,7 @@ def resave_as_h5( | |
| """ | ||
|
|
||
| if not processing_opts: | ||
| processing_opts = ProcessingOptions() | ||
| processing_opts = bdv.ProcessingOptions() | ||
|
lguerard marked this conversation as resolved.
Outdated
|
||
|
|
||
| if use_deflate_compression: | ||
| use_deflate_compression_arg = "use_deflate_compression " | ||
|
|
@@ -1032,7 +1036,7 @@ def resave_as_h5( | |
| ) | ||
|
|
||
| log.debug("Resave as HDF5 options: <%s>", options) | ||
| IJ.run("As HDF5", str(options)) | ||
| IJ.run("Resave as HDF5 (local)", str(options)) | ||
|
|
||
|
|
||
| def flip_axes(source_xml_file, x=False, y=True, z=False): | ||
|
|
@@ -1593,58 +1597,146 @@ def fuse_dataset( | |
| def fuse_dataset_bdvp( | ||
| project_path, | ||
| command, | ||
| processing_opts=None, | ||
| result_path=None, | ||
| compression="LZW", | ||
| fusion_method="SMOOTH AVERAGE", | ||
| range_channels="", | ||
| range_slices="", | ||
| range_frames="", | ||
| n_resolution_levels=5, | ||
| use_lzw_compression=True, | ||
| split_slices=False, | ||
| split_channels=False, | ||
| split_frames=False, | ||
| override_z_ratio=False, | ||
| z_ratio=1.0, | ||
| use_interpolation=True, | ||
| ): | ||
| """Export a BigDataViewer project using the BIOP Kheops exporter. | ||
|
|
||
| Use the BIOP Kheops exporter to convert a BigDataViewer project into | ||
| OME-TIFF files, with optional compression. | ||
| Use BIOP Kheops exporter to fuse a BigDataViewer project and save | ||
| it as pyramidal OME-TIFF. | ||
|
|
||
| Parameters | ||
| ---------- | ||
| project_path : str | ||
| Full path to the BigDataViewer XML project file. | ||
| command : CommandService | ||
| The Scijava CommandService instance to execute the export command. | ||
| processing_opts : ProcessingOptions, optional | ||
| Options defining which parts of the dataset to process. If None, default | ||
| processing options will be used (process all angles, channels, etc.). | ||
| result_path : str, optional | ||
| Path where to store the exported files. If None, files will be saved in | ||
| the same directory as the input project. | ||
| compression : str, optional | ||
| Compression method to use for the TIFF files. Default is "LZW". | ||
| Path where to store the exported files. If ``None``, files will be | ||
| saved in the same directory as the input project. | ||
| fusion_method : str, optional | ||
| Fusion method to use for exporting (default ``"SMOOTH AVERAGE"``). | ||
| range_channels : str, optional | ||
| Channels to include in the export. Default is all channels. | ||
| range_slices : str, optional | ||
| Slices to include in the export. Default is all slices. | ||
| range_frames : str, optional | ||
| Frames to include in the export. Default is all frames. | ||
| n_resolution_levels : int, optional | ||
| Number of pyramid resolution levels to use for the export. Default is 5. | ||
| use_lzw_compression : bool, optional | ||
| If True, compressed the output file using LZW. Default is True. | ||
| split_slices : bool, optional | ||
| If True, splits the output into separate files for each slice. Default is False. | ||
| split_channels : bool, optional | ||
| If True, splits the output into separate files for each channel. Default is False. | ||
| split_frames : bool, optional | ||
| If True, splits the output into separate files for each frame. Default is False. | ||
| override_z_ratio : bool, optional | ||
| If True, overrides the default z_ratio value. Default is False. | ||
| z_ratio : float, optional | ||
| The z ratio to use for the export. Default is 1.0. | ||
| use_interpolation : bool, optional | ||
| If True, interpolates during fusion (takes ~4x longer). Default is True. | ||
|
|
||
| Notes | ||
| ----- | ||
| This function requires the PTBIOP update site to be enabled in Fiji/ImageJ. | ||
| This function requires the PTBIOP update site to be enabled in Fiji/ | ||
| ImageJ. | ||
|
|
||
| Examples | ||
| -------- | ||
| fuse_dataset_bdvp(xml_input, cs) | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This example doesn't provide any additional information as it is just the most simple case of how to call this function. Possible options:
Note: please use the right syntax for docstring examples (they need to be prefixed with "
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I have added 2 examples, please let me know if I should add more ! |
||
| """ | ||
| if processing_opts is None: | ||
| processing_opts = ProcessingOptions() | ||
|
|
||
| file_info = pathtools.parse_path(project_path) | ||
|
|
||
| if not result_path: | ||
| result_path = file_info["path"] | ||
| # if not os.path.exists(result_path): | ||
| # os.makedirs(result_path) | ||
|
|
||
| command.run( | ||
| FuseBigStitcherDatasetIntoOMETiffCommand, | ||
| True, | ||
| "image", | ||
| True, # seems to indicate whether to run the command headless or not | ||
| "xml_bigstitcher_file", | ||
| project_path, | ||
| "output_dir", | ||
| "output_path_directory", | ||
| result_path, | ||
| "compression", | ||
| compression, | ||
| "subset_channels", | ||
| "", | ||
| "subset_slices", | ||
| "", | ||
| "subset_frames", | ||
| "", | ||
| "compress_temp_files", | ||
| False, | ||
| ) | ||
| "range_channels", | ||
| range_channels, | ||
| "range_slices", | ||
| range_slices, | ||
| "range_frames", | ||
| range_frames, | ||
| "n_resolution_levels", | ||
| n_resolution_levels, | ||
| "fusion_method", | ||
| fusion_method, | ||
| "use_lzw_compression", | ||
| use_lzw_compression, | ||
| "split_slices", | ||
| split_slices, | ||
| "split_channels", | ||
| split_channels, | ||
| "split_frames", | ||
| split_frames, | ||
| "override_z_ratio", | ||
| override_z_ratio, | ||
| "z_ratio", | ||
| z_ratio, | ||
| "use_interpolation", | ||
| use_interpolation, | ||
| ).get() | ||
|
|
||
|
|
||
| def join_files_with_channel_suffix(files, nchannels): | ||
|
lguerard marked this conversation as resolved.
Outdated
|
||
| """Join filenames and append channel-suffixed copies. | ||
|
|
||
| For each filename in ``files``, return a list where original filenames | ||
| appear first followed by copies with suffixes ``_0`` .. ``_{n-2}`` | ||
| (inserted before the file extension). This is suitable for passing | ||
| to Bioformats/Jython in ``show_list`` mode when each channel is stored | ||
| as a separate file. | ||
|
|
||
| Parameters | ||
| ---------- | ||
| files : list or tuple | ||
| List or tuple of filename strings. | ||
| nchannels : int | ||
| Number of channels (>=1). If ``nchannels`` is 1 no suffixed copies | ||
| are added. | ||
|
|
||
| Returns | ||
| ------- | ||
| list of str | ||
| Ordered list of filenames (originals then suffixed copies). | ||
| """ | ||
| import os | ||
|
|
||
| if not files: | ||
| return "" | ||
| try: | ||
| x = range(int(nchannels) - 1) | ||
| except Exception: | ||
| x = [0] | ||
| suff = "_" + str(x) | ||
| out = [] | ||
| # keep original order, then add suffixed copies | ||
| for f in files: | ||
| out.append(f) | ||
| for i in x: | ||
| suff = "_" + str(i) | ||
| for f in files: | ||
| base, ext = os.path.splitext(f) | ||
| out.append(base + suff + ext) | ||
| return out | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is expected to fail as
DefinitionOptionsis a class defined in this very file here.➡️ Nicely illustrates the need for unit tests.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I have resolved the other comment, but will leave this one open for the unit test part.