Storm 1.11.1.1
A Modern Probabilistic Model Checker
Loading...
Searching...
No Matches
ArchiveWriter.h
Go to the documentation of this file.
1#pragma once
2
3#include <bit>
4#include <filesystem>
5#include <memory>
6#include <span>
7#include <string>
8#include <vector>
9
10#include "storm-config.h"
16
17#ifdef STORM_HAVE_LIBARCHIVE
18#include <archive.h>
19#include <archive_entry.h>
20#endif
21
22namespace storm {
23
24namespace storage {
25class BitVector;
26}
27
28namespace io {
29
30namespace detail {
31template<typename R>
32concept ArchiveWritableRange = std::ranges::input_range<R> && std::is_arithmetic_v<std::ranges::range_value_t<R>>;
33
34template<typename R, std::endian Endianness>
36 ArchiveWritableRange<R> && std::same_as<std::remove_cvref_t<R>, std::vector<std::ranges::range_value_t<R>>> &&
37 !std::is_same_v<std::ranges::range_value_t<R>, bool> && (Endianness == std::endian::native || sizeof(std::ranges::range_value_t<R>) == 1);
38
39} // namespace detail
40
41static_assert(std::endian::native == std::endian::little || std::endian::native == std::endian::big, "This code is not supported for mixed endian systems.");
42
44 public:
48 ArchiveWriter(std::filesystem::path const& filename, CompressionMode const compression);
49
53 void addDirectory(std::filesystem::path const& archivePath);
54
60 void addFile(std::filesystem::path const& archivePath, char const* data, std::size_t const size);
61
67 template<detail::ArchiveWritableRange Range, std::endian Endianness = std::endian::little>
69 void addBinaryFile(std::filesystem::path const& archivePath, Range&& data) {
70 auto const numBytes = data.size() * sizeof(std::ranges::range_value_t<Range>);
71 addFile(archivePath, reinterpret_cast<char const*>(data.data()), numBytes);
72 }
73
79 template<detail::ArchiveWritableRange Range, std::endian Endianness = std::endian::little>
80 requires(!detail::ArchiveWritableWithoutBuffer<Range, Endianness> && !std::is_same_v<std::ranges::range_value_t<Range>, bool>)
81 void addBinaryFile(std::filesystem::path const& archivePath, Range const& data) {
82 using T = std::ranges::range_value_t<Range>;
83 auto const numBytes = data.size() * sizeof(T);
84 static_assert(BufferSize % sizeof(T) == 0, "Buffer size must be a multiple of sizeof(T).");
85 std::vector<T> buffer(BufferSize / sizeof(T)); // todo check array
86
87 uint64_t startOfChunk = 0;
88 auto getNextChunk = [&data, &buffer, &startOfChunk]() {
89 auto const endOfChunk = std::min<uint64_t>(startOfChunk + buffer.size(), data.size());
90 auto bufferIt = buffer.begin();
91 for (uint64_t i = startOfChunk; i < endOfChunk; ++i) {
92 if constexpr (std::endian::native != Endianness && sizeof(T) > 1) {
93 // copy into buffer with byteswap
94 *bufferIt = storm::utility::byteSwap(data[i]);
95 } else {
96 // copy into buffer
97 *bufferIt = data[i];
98 }
99 ++bufferIt;
100 }
101 std::span<const char> chunk(reinterpret_cast<const char*>(buffer.data()), (endOfChunk - startOfChunk) * sizeof(T));
102 startOfChunk = endOfChunk;
103 return chunk;
104 };
105 addFileFromChunks(archivePath, getNextChunk, numBytes);
106 }
107
113 void addBinaryFile(std::filesystem::path const& archivePath, storm::storage::BitVector const& data);
114
120 void addTextFile(std::filesystem::path const& archivePath, std::string const& data);
121
122 private:
129 template<typename F>
130 void addFileFromChunks(std::string const& archivePath, F getNextChunk, size_t const size) {
131#ifdef STORM_HAVE_LIBARCHIVE
132 archive_entry* entry = archive_entry_new();
133 STORM_LOG_THROW(entry, storm::exceptions::FileIoException, "Failed to create archive entry.");
134
135 // Fill in metadata: path, file size, file type, permissions, etc.
136 archive_entry_set_pathname(entry, archivePath.c_str());
137 archive_entry_set_size(entry, size);
138 archive_entry_set_filetype(entry, AE_IFREG);
139 archive_entry_set_perm(entry, 0777);
140
141 // Write the header (metadata) to the archive
142 checkResult(archive_write_header(_archive.get(), entry), entry);
143
144 // Write the file contents
145 uint64_t bytesWritten = 0;
146 for (auto chunk = getNextChunk(); chunk.size() > 0 && bytesWritten < size; chunk = getNextChunk()) {
147 auto res = archive_write_data(_archive.get(), chunk.data(), chunk.size());
148 checkResult(res, entry);
149 bytesWritten += res;
150 }
151 STORM_LOG_WARN_COND(bytesWritten == size, "When writing file '" << archivePath << "' to archive, " << bytesWritten << " bytes were written but " << size
152 << " bytes were expected.");
153
154 // Free the entry metadata after we finish writing
155 archive_entry_free(entry);
156#else
157 STORM_LOG_THROW(false, storm::exceptions::MissingLibraryException, "Writing archives is not supported. Storm is compiled without LibArchive.");
158#endif
159 }
160
161#ifdef STORM_HAVE_LIBARCHIVE
165 void checkResult(auto resultCode, archive_entry* entry = nullptr) const;
166
167 struct ArchiveDeleter {
168 void operator()(archive* arch) const noexcept;
169 };
170
171 std::unique_ptr<archive, ArchiveDeleter> _archive;
172#endif
173
174 static constexpr size_t BufferSize = 8192;
175};
176} // namespace io
177} // namespace storm
void addBinaryFile(std::filesystem::path const &archivePath, Range &&data)
Add a file to the archive using a binary encoding of the provided data.
void addBinaryFile(std::filesystem::path const &archivePath, Range const &data)
Add a file to the archive using a binary encoding of the provided data.
void addTextFile(std::filesystem::path const &archivePath, std::string const &data)
Add a text file to the archive.
void addFile(std::filesystem::path const &archivePath, char const *data, std::size_t const size)
Add a file to the archive.
void addDirectory(std::filesystem::path const &archivePath)
Adds a (sub-) directory to the archive.
A bit vector that is internally represented as a vector of 64-bit values.
Definition BitVector.h:16
#define STORM_LOG_WARN_COND(cond, message)
Definition macros.h:38
#define STORM_LOG_THROW(cond, exception, message)
Definition macros.h:30
storage::BitVector BitVector
T byteSwap(T const t)
Swaps the byte representation of the given value, (i.e., swaps endianness) Taken from https://en....