32concept ArchiveWritableRange = std::ranges::input_range<R> && std::is_arithmetic_v<std::ranges::range_value_t<R>>;
34template<
typename R, std::endian Endianness>
37 !std::is_same_v<std::ranges::range_value_t<R>,
bool> && (Endianness == std::endian::native ||
sizeof(std::ranges::range_value_t<R>) == 1);
53 void addDirectory(std::filesystem::path
const& archivePath);
60 void addFile(std::filesystem::path
const& archivePath,
char const* data, std::size_t
const size);
67 template<detail::ArchiveWritableRange Range, std::endian Endianness = std::endian::little>
69 void addBinaryFile(std::filesystem::path
const& archivePath, Range&& data) {
70 auto const numBytes = data.size() *
sizeof(std::ranges::range_value_t<Range>);
71 addFile(archivePath,
reinterpret_cast<char const*
>(data.data()), numBytes);
79 template<detail::ArchiveWritableRange Range, std::endian Endianness = std::endian::little>
81 void addBinaryFile(std::filesystem::path
const& archivePath, Range
const& data) {
82 using T = std::ranges::range_value_t<Range>;
83 auto const numBytes = data.size() *
sizeof(T);
84 static_assert(BufferSize %
sizeof(T) == 0,
"Buffer size must be a multiple of sizeof(T).");
85 std::vector<T> buffer(BufferSize /
sizeof(T));
87 uint64_t startOfChunk = 0;
88 auto getNextChunk = [&data, &buffer, &startOfChunk]() {
89 auto const endOfChunk = std::min<uint64_t>(startOfChunk + buffer.size(), data.size());
90 auto bufferIt = buffer.begin();
91 for (uint64_t i = startOfChunk; i < endOfChunk; ++i) {
92 if constexpr (std::endian::native != Endianness &&
sizeof(T) > 1) {
101 std::span<const char> chunk(
reinterpret_cast<const char*
>(buffer.data()), (endOfChunk - startOfChunk) *
sizeof(T));
102 startOfChunk = endOfChunk;
105 addFileFromChunks(archivePath, getNextChunk, numBytes);
120 void addTextFile(std::filesystem::path
const& archivePath, std::string
const& data);
130 void addFileFromChunks(std::string
const& archivePath, F getNextChunk,
size_t const size) {
131#ifdef STORM_HAVE_LIBARCHIVE
132 archive_entry* entry = archive_entry_new();
133 STORM_LOG_THROW(entry, storm::exceptions::FileIoException,
"Failed to create archive entry.");
136 archive_entry_set_pathname(entry, archivePath.c_str());
137 archive_entry_set_size(entry, size);
138 archive_entry_set_filetype(entry, AE_IFREG);
139 archive_entry_set_perm(entry, 0777);
142 checkResult(archive_write_header(_archive.get(), entry), entry);
145 uint64_t bytesWritten = 0;
146 for (
auto chunk = getNextChunk(); chunk.size() > 0 && bytesWritten < size; chunk = getNextChunk()) {
147 auto res = archive_write_data(_archive.get(), chunk.data(), chunk.size());
148 checkResult(res, entry);
151 STORM_LOG_WARN_COND(bytesWritten == size,
"When writing file '" << archivePath <<
"' to archive, " << bytesWritten <<
" bytes were written but " << size
152 <<
" bytes were expected.");
155 archive_entry_free(entry);
157 STORM_LOG_THROW(
false, storm::exceptions::MissingLibraryException,
"Writing archives is not supported. Storm is compiled without LibArchive.");
161#ifdef STORM_HAVE_LIBARCHIVE
165 void checkResult(
auto resultCode, archive_entry* entry =
nullptr)
const;
167 struct ArchiveDeleter {
168 void operator()(archive* arch)
const noexcept;
171 std::unique_ptr<archive, ArchiveDeleter> _archive;
174 static constexpr size_t BufferSize = 8192;