14 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
17 if (metadata !=
nullptr) {
21 if (data !=
nullptr) {
23 for (
size_t i = 0; i < outerDim; i++) {
24 if (data[i] !=
nullptr) {
31 if (endPointers !=
nullptr) {
37 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
42 assert(num_rows > 0 && num_cols > 0 &&
43 "The number of rows and columns must be greater than 0");
47 if constexpr (columnMajor) {
57 index_t =
sizeof(indexT);
62 data = (
void **)malloc(outerDim *
sizeof(
void *));
63 endPointers = (
void **)malloc(outerDim *
sizeof(
void *));
64 }
catch (
const std::exception &e) {
65 std::cerr << e.what() <<
'\n';
69 for (
size_t i = 0; i < outerDim; i++) {
71 endPointers[i] =
nullptr;
75 metadata =
new uint32_t[NUM_META_DATA];
76 metadata[0] = compressionLevel;
77 metadata[1] = innerDim;
78 metadata[2] = outerDim;
81 metadata[5] = index_t;
93 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
97 if (mat.nonZeros() == 0) {
102 endPointers =
nullptr;
108 mat.makeCompressed();
111 numRows = mat.rows();
112 numCols = mat.cols();
114 outerDim = columnMajor ? numCols : numRows;
115 innerDim = columnMajor ? numRows : numCols;
118 nnz = mat.nonZeros();
121 compressCSC(mat.valuePtr(), mat.innerIndexPtr(), mat.outerIndexPtr());
125 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
129 if (mat.nonZeros() == 0) {
134 endPointers =
nullptr;
140 mat.makeCompressed();
143 numRows = mat.rows();
144 numCols = mat.cols();
150 nnz = mat.nonZeros();
153 compressCSC(mat.valuePtr(), mat.innerIndexPtr(), mat.outerIndexPtr());
157 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
165 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
166 template <u
int8_t otherCompressionLevel>
171 if constexpr (otherCompressionLevel == compressionLevel) {
180 if constexpr (otherCompressionLevel == 1) {
181 temp = other.toIVCSC();
182 }
else if constexpr (otherCompressionLevel == 2) {
183 temp = other.toIVCSC();
190 #ifdef IVSPARSE_DEBUG
196 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
197 template <
typename T2,
typename indexT2>
199 T2 *vals, indexT2 *innerIndices, indexT2 *outerPtr, uint32_t num_rows, uint32_t num_cols, uint32_t nnz) {
201 #ifdef IVSPARSE_DEBUG
202 assert(num_rows > 0 && num_cols > 0 &&
203 "The number of rows and columns must be greater than 0");
204 assert(nnz > 0 &&
"The number of non-zero elements must be greater than 0");
208 if (nnz == 0) [[unlikely]] {
212 endPointers =
nullptr;
230 compressCSC(vals, innerIndices, outerPtr);
234 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
235 template <
typename T2,
typename indexT2>
237 std::vector<std::tuple<indexT2, indexT2, T2>> &entries, uint32_t num_rows, uint32_t num_cols, uint32_t nnz) {
239 #ifdef IVSPARSE_DEBUG
240 assert(num_rows > 0 && num_cols > 0 &&
241 "The number of rows and columns must be greater than 0");
242 assert(nnz > 0 &&
"The number of non-zero elements must be greater than 0");
246 if (nnz == 0) [[unlikely]] {
264 index_t =
sizeof(indexT);
266 metadata =
new uint32_t[NUM_META_DATA];
267 metadata[0] = compressionLevel;
268 metadata[1] = innerDim;
269 metadata[2] = outerDim;
272 metadata[5] = index_t;
276 data = (
void **)malloc(outerDim *
sizeof(
void *));
277 endPointers = (
void **)malloc(outerDim *
sizeof(
void *));
278 }
catch (std::bad_alloc &e) {
279 std::cerr <<
"Error: Could not allocate memory for IVSparse matrix"
285 for (
size_t i = 0; i < outerDim; i++) {
287 endPointers[i] =
nullptr;
291 std::sort(entries.begin(), entries.end(),
292 [](
const std::tuple<indexT2, indexT2, T2> &a,
293 const std::tuple<indexT2, indexT2, T2> &b) {
294 if (std::get<1>(a) == std::get<1>(b)) {
295 return std::get<0>(a) < std::get<0>(b);
297 return std::get<1>(a) < std::get<1>(b);
301 std::map<T2, std::vector<indexT2>> maps[outerDim];
304 for (
size_t i = 0; i < nnz; i++) {
306 indexT2 row = std::get<0>(entries[i]);
307 indexT2 col = std::get<1>(entries[i]);
308 T2 val = std::get<2>(entries[i]);
311 if (maps[col].find(val) != maps[col].end()) {
313 maps[col][val].push_back(row - maps[col][val][1]);
316 maps[col][val][1] = row;
319 if (maps[col][val][maps[col][val].size() - 1] > maps[col][val][0])
320 maps[col][val][0] = maps[col][val][maps[col][val].size() - 1];
323 maps[col][val] = std::vector<indexT2>{row};
326 maps[col][val].push_back(row);
327 maps[col][val].push_back(row);
332 #ifdef IVSPARSE_HAS_OPENMP
333 #pragma omp parallel for
335 for (uint32_t i = 0; i < outerDim; i++) {
336 size_t outerByteSize = 0;
338 for (
auto &pair : maps[i]) {
340 pair.second[0] = byteWidth(pair.second[0]);
344 outerByteSize +=
sizeof(T) + 1 +
345 (pair.second[0] * (pair.second.size() - 2)) +
350 if (outerByteSize == 0) {
352 endPointers[i] =
nullptr;
358 data[i] = malloc(outerByteSize);
359 }
catch (std::bad_alloc &e) {
360 std::cout <<
"Error: " << e.what() << std::endl;
365 void *helpPtr = data[i];
368 for (
auto &pair : maps[i]) {
370 *(T *)helpPtr = (T)pair.first;
371 helpPtr = (T *)helpPtr + 1;
374 *(uint8_t *)helpPtr = (uint8_t)pair.second[0];
375 helpPtr = (uint8_t *)helpPtr + 1;
378 for (
size_t k = 0; k < pair.second.size(); k++) {
380 if (k == 0 || k == 1) {
385 switch (pair.second[0]) {
387 *(uint8_t *)helpPtr = (uint8_t)pair.second[k];
388 helpPtr = (uint8_t *)helpPtr + 1;
391 *(uint16_t *)helpPtr = (uint16_t)pair.second[k];
392 helpPtr = (uint16_t *)helpPtr + 1;
395 *(uint32_t *)helpPtr = (uint32_t)pair.second[k];
396 helpPtr = (uint32_t *)helpPtr + 1;
399 *(uint64_t *)helpPtr = (uint64_t)pair.second[k];
400 helpPtr = (uint64_t *)helpPtr + 1;
407 switch (pair.second[0]) {
409 *(uint8_t *)helpPtr = (uint8_t)DELIM;
410 helpPtr = (uint8_t *)helpPtr + 1;
413 *(uint16_t *)helpPtr = (uint16_t)DELIM;
414 helpPtr = (uint16_t *)helpPtr + 1;
417 *(uint32_t *)helpPtr = (uint32_t)DELIM;
418 helpPtr = (uint32_t *)helpPtr + 1;
421 *(uint64_t *)helpPtr = (uint64_t)DELIM;
422 helpPtr = (uint64_t *)helpPtr + 1;
426 endPointers[i] = helpPtr;
436 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
457 index_t =
sizeof(indexT);
461 data = (
void **)malloc(
sizeof(
void *));
462 endPointers = (
void **)malloc(
sizeof(
void *));
463 }
catch (std::bad_alloc &e) {
464 throw std::bad_alloc();
472 endPointers[0] =
nullptr;
475 endPointers[0] = (
char *)data[0] + vec.
byteSize();
477 }
catch (std::bad_alloc &e) {
478 throw std::bad_alloc();
485 metadata =
new uint32_t[NUM_META_DATA];
486 metadata[0] = compressionLevel;
487 metadata[1] = innerDim;
488 metadata[2] = outerDim;
491 metadata[5] = index_t;
495 #ifdef IVSPARSE_DEBUG
501 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
507 for (
size_t i = 1; i < vecs.size(); i++) {
516 #ifdef IVSPARSE_DEBUG
522 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
525 FILE *fp = fopen(filename,
"rb");
527 #ifdef IVSPARSE_DEBUG
529 throw std::runtime_error(
"Error: Could not open file");
534 metadata =
new uint32_t[NUM_META_DATA];
535 fread(metadata,
sizeof(uint32_t), NUM_META_DATA, fp);
538 innerDim = metadata[1];
539 outerDim = metadata[2];
542 index_t = metadata[5];
544 numRows = columnMajor ? innerDim : outerDim;
545 numCols = columnMajor ? outerDim : innerDim;
547 #ifdef IVSPARSE_DEBUG
550 if (metadata[0] != compressionLevel) {
552 throw std::runtime_error(
553 "Error: Compression level of file does not match compression level of "
560 data = (
void **)malloc(outerDim *
sizeof(
void *));
561 endPointers = (
void **)malloc(outerDim *
sizeof(
void *));
562 }
catch (std::bad_alloc &e) {
563 std::cerr <<
"Error: Could not allocate memory for IVSparse matrix"
569 for (
size_t i = 0; i < outerDim; i++) {
572 fread(&size,
sizeof(uint64_t), 1, fp);
577 endPointers[i] =
nullptr;
583 data[i] = malloc(size);
584 endPointers[i] = (
char *)data[i] + size;
585 }
catch (std::bad_alloc &e) {
586 throw std::bad_alloc();
591 for (
size_t i = 0; i < outerDim; i++) {
592 fread(data[i], 1, (uint8_t *)endPointers[i] - (uint8_t *)data[i], fp);
602 #ifdef IVSPARSE_DEBUG
611 template <
typename T,
typename indexT, u
int8_t compressionLevel,
bool columnMajor>
613 std::unordered_map<T, std::vector<indexT>> maps[], uint32_t num_rows, uint32_t num_cols) {
616 if constexpr (columnMajor) {
627 index_t =
sizeof(indexT);
631 data = (
void **)malloc(outerDim *
sizeof(
void *));
632 endPointers = (
void **)malloc(outerDim *
sizeof(
void *));
633 }
catch (
const std::exception &e) {
634 std::cerr << e.what() <<
'\n';
638 for (
size_t i = 0; i < outerDim; i++) {
640 endPointers[i] =
nullptr;
646 #ifdef IVSPARSE_HAS_OPENMP
647 #pragma omp parallel for
649 for (
size_t i = 0; i < outerDim; i++) {
651 if (maps[i].empty()) [[unlikely]] {
653 endPointers[i] =
nullptr;
660 for (
auto &val : maps[i]) {
662 byteSize +=
sizeof(T) + 1 +
663 (val.second[val.second.size() - 1] * (val.second.size() - 1) +
664 val.second[val.second.size() - 1]);
669 data[i] = malloc(byteSize);
670 }
catch (
const std::exception &e) {
671 std::cerr << e.what() <<
'\n';
675 endPointers[i] = (
char *)data[i] + byteSize;
678 void *helpPtr = data[i];
680 for (
auto &val : maps[i]) {
681 nnz += val.second.size() - 1;
684 *(T *)helpPtr = val.first;
685 helpPtr = (
char *)helpPtr +
sizeof(T);
686 *(uint8_t *)helpPtr = (uint8_t)val.second[val.second.size() - 1];
687 helpPtr = (uint8_t *)helpPtr + 1;
690 for (
size_t k = 0; k < val.second.size(); k++) {
691 if (k == val.second.size() - 1)
break;
693 switch (val.second[val.second.size() - 1]) {
695 *(uint8_t *)helpPtr = (uint8_t)val.second[k];
696 helpPtr = (uint8_t *)helpPtr + 1;
699 *(uint16_t *)helpPtr = (uint16_t)val.second[k];
700 helpPtr = (uint16_t *)helpPtr + 1;
703 *(uint32_t *)helpPtr = (uint32_t)val.second[k];
704 helpPtr = (uint32_t *)helpPtr + 1;
707 *(uint64_t *)helpPtr = (uint64_t)val.second[k];
708 helpPtr = (uint64_t *)helpPtr + 1;
714 switch (val.second[val.second.size() - 1]) {
716 *(uint8_t *)helpPtr = (uint8_t)DELIM;
717 helpPtr = (uint8_t *)helpPtr + 1;
720 *(uint16_t *)helpPtr = (uint16_t)DELIM;
721 helpPtr = (uint16_t *)helpPtr + 1;
724 *(uint32_t *)helpPtr = (uint32_t)DELIM;
725 helpPtr = (uint32_t *)helpPtr + 1;
728 *(uint64_t *)helpPtr = (uint64_t)DELIM;
729 helpPtr = (uint64_t *)helpPtr + 1;
737 metadata =
new uint32_t[NUM_META_DATA];
740 metadata[0] = compressionLevel;
741 metadata[1] = innerDim;
742 metadata[2] = outerDim;
745 metadata[5] = index_t;
751 #ifdef IVSPARSE_DEBUG
Definition: IVCSC_Vector.hpp:25
uint32_t getLength()
Definition: IVCSC_Vector_Methods.hpp:200
void * end()
Definition: IVCSC_Vector_Methods.hpp:177
size_t byteSize()
Definition: IVCSC_Vector_Methods.hpp:183
uint32_t nonZeros()
Definition: IVCSC_Vector_Methods.hpp:165
void * begin()
Definition: IVCSC_Vector_Methods.hpp:171
Definition: IVCSC_SparseMatrix.hpp:29
void append(typename SparseMatrix< T, indexT, compressionLevel, columnMajor >::Vector &vec)
Definition: IVCSC_Methods.hpp:245
SparseMatrix()
Definition: IVCSC_SparseMatrix.hpp:99
~SparseMatrix()
Destroy the Sparse Matrix object.
Definition: IVCSC_Constructors.hpp:15