Newer
Older
#include "FileIO.hpp"
#include <algorithm>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
HeaderInfo FileIO::ReadHeader(const std::string& path) {
// read the header into a buf (20 should be sufficient)
MPI_File fh;
MPI_File_open(MPI_COMM_SELF, path.c_str(),
MPI_MODE_RDONLY | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &fh);
std::array<char, HeaderBufSize> buf;
MPI_File_read_all(fh, buf.data(), buf.size(), MPI_CHAR, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
std::istringstream input;
input.rdbuf()->pubsetbuf(buf.data(), buf.size());
// parse the stream
std::size_t noCols{};
std::size_t noRows{};
input >> noCols >> noRows;
if (noCols < 1 || noRows < 1) {
MpiReportErrorAbort("File header corrupt");
}
std::string dummy; // skip line break
std::getline(input, dummy);
const auto headerLength = input.tellg();
return {
{noCols, noRows},
static_cast<std::size_t>(
headerLength) // should fit (max is HeaderBuf size)
};
}
void FileIO::WriteHeader(const HeaderInfo& header, const std::string& path,
const MpiEnvironment& env) {
if (!env.isMaster()) return;
std::ostringstream ss;
ss << header.GlobalSize.Cols << ' ' << header.GlobalSize.Rows << '\n';
const auto buf = ss.str();
MPI_File fh;
MPI_File_open(MPI_COMM_SELF, path.c_str(),
MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
MPI_File_write(fh, buf.data(), buf.size(), MPI_CHAR, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
}
Size FileIO::GetTileSize(Size globalSize, Size gridSize) {
const auto tileSizeCols = globalSize.Cols / gridSize.Cols;
const auto tileSizeRows = globalSize.Rows / gridSize.Rows;
return {tileSizeCols, tileSizeRows};
}
FileIO::Tile::Tile(const std::string& path, HeaderInfo header, Size gridSize,
: _path(path), _headerLength(header.HeaderLength),
_srcSize(header.GlobalSize), _gridSize(gridSize), _rank(rank), _buf(buf),
_tileSize(FileIO::GetTileSize(header.GlobalSize, gridSize)), //
_tileX(rank % gridSize.Cols), //
_tileY(rank / gridSize.Cols), //
_tileType(
MpiSubarray({{header.GlobalSize.Rows, _tileSize.Rows, 0},
{header.GlobalSize.Cols + LF, _tileSize.Cols, 0}})),
_bufType(MpiSubarray({{_tileSize.Rows + 2, _tileSize.Rows, 1},
{_tileSize.Cols + 2, _tileSize.Cols, 1}})),
_displ(header.HeaderLength +
(header.GlobalSize.Cols + LF) * _tileSize.Rows * _tileY +
_tileSize.Cols * _tileX) {}
void FileIO::Tile::Read() {
MPI_File file;
MPI_File_open(MPI_COMM_WORLD, _path.c_str(),
MPI_MODE_RDONLY | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &file);
MPI_File_set_view(file, _displ, MPI_CHAR, _tileType.type(), "native",
MPI_INFO_NULL);
MPI_File_read_all(file, _buf, 1, _bufType.type(), MPI_STATUS_IGNORE);
MPI_File_close(&file);
}
void FileIO::Tile::Write() const {
MPI_File file;
MPI_File_open(MPI_COMM_WORLD, _path.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file);
MPI_File_set_view(file, _displ, MPI_CHAR, _tileType.type(), "native",
MPI_INFO_NULL);
MPI_File_write_all(file, _buf, 1, _bufType.type(), MPI_STATUS_IGNORE);
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
/// fix line feeds
// this is done with an collective call, but only the rightmost
// ranks actually write line feeds
// are we a rightMost tile?
const auto rightMost = _tileX == _gridSize.Cols - 1;
const auto bottomMost = _tileY == _gridSize.Rows - 1;
const auto noLfNeeded = rightMost ? //
(bottomMost ? //
_tileSize.Rows - 1 //
: _tileSize.Rows) //
: 0; //
const auto lfType = MpiSubarray( // subsize must be > 0
{{_srcSize.Rows, std::max<std::size_t>(noLfNeeded, 1), 0},
{_srcSize.Cols + LF, 1, 0}});
const std::vector<char> lfs(noLfNeeded, '\n');
const auto lfDisp = _headerLength +
(_srcSize.Cols + LF) * _tileSize.Rows * _tileY +
_srcSize.Cols;
MPI_File_set_view(file, lfDisp, MPI_CHAR, lfType.type(), "native",
MPI_INFO_NULL);
// lfs is empty for non-rightmost ranks
MPI_File_write_all(file, lfs.data(), lfs.size(), MPI_CHAR,
MPI_STATUS_IGNORE);
MPI_File_close(&file);
}