17#ifndef __B2VECTOR_COMPRESSED_H__
18#define __B2VECTOR_COMPRESSED_H__
20#include "b2linear_algebra_def.H"
22namespace b2000 {
namespace b2linalg {
25 typedef Vcompressed base;
26 typedef Vcompressed_scale_constref const_base;
27 typedef Vcompressed copy;
31class Vector<T, Vcompressed> {
33 Vector(
size_t s_ = 0) : s(s_) {}
35 Vector(
const Vector& v_) : s(v_.s), index(v_.index), v(v_.v) {}
37 template <
typename T1>
38 Vector(
const Vector<T1, Vcompressed>& v_)
39 : s(v_.s), index(v_.index), v(v_.v.begin(), v_.v.end()) {}
41 template <
typename T1>
42 Vector& operator=(
const Vector<T1, Vcompressed>& v_) {
45 v.assign(v_.v.begin(), v_.v.end());
49 Vector& operator=(
const Vector<T, Vdense>& v_) {
52 for (
typename Vector<T, Vdense>::const_iterator i = v_.begin(); i != v_.end(); ++i) {
53 if (*i != T(0)) { ++nb_nz; }
58 for (
typename Vector<T, Vdense>::const_iterator i = v_.begin(); i != v_.end(); ++i) {
60 index[nb_nz] = i - v_.begin();
68 size_t size()
const {
return s; }
70 size_t get_nb_nonzero()
const {
return index.size(); }
72 bool is_null()
const {
return this == &null; }
79 void resize(
size_t s_,
size_t snn) {
85 void get_value(
size_t*& index_, T*& value_) {
90 auto& get_index()
const {
return index; }
93 const Vector<T, MVProd<Mcompressed_col_st_constref, Vcompressed_scale_constref> >& v_) {
96 std::vector<size_t> vtmp_index;
97 std::vector<T> vtmp_value;
98 if (v_.v.v == &v[0]) {
99 vtmp_index.swap(index);
102 resize(v_.size(), 0);
106 const size_t end_list_flag = v_.m.size1();
108 size_t end_list = end_list_flag;
110 T scale = v_.scale * v_.m.scale * v_.v.scale;
112 const size_t* b_rowind_begin = v_.v.index;
113 const size_t*
const b_rowind_end = b_rowind_begin + v_.v.snn;
114 const T* b_value_begin = v_.v.v;
115 while (b_rowind_begin != b_rowind_end) {
116 const size_t* a_rowind_begin = v_.m.index + v_.m.si[*b_rowind_begin];
117 const size_t*
const a_rowind_end = v_.m.index + v_.m.si[*b_rowind_begin + 1];
118 const T* a_value_begin = v_.m.m + v_.m.si[*b_rowind_begin++];
119 const T b_value_begin_v = *b_value_begin++;
120 while (a_rowind_begin != a_rowind_end) {
121 tmp_value[*a_rowind_begin] += *a_value_begin++ * b_value_begin_v;
122 if (!(std::numeric_limits<size_t>::max() - tmp_index[*a_rowind_begin])) {
123 tmp_index[*a_rowind_begin] = end_list;
124 end_list = *a_rowind_begin;
129 std::vector<std::pair<size_t, T> > res_tmp;
130 while (end_list != end_list_flag) {
131 res_tmp.push_back(std::pair<size_t, T>(end_list, tmp_value[end_list]));
132 const size_t end_list_next = tmp_index[end_list];
133 tmp_index[end_list] = std::numeric_limits<size_t>::max();
134 tmp_value[end_list] = T(0);
135 end_list = end_list_next;
137 std::sort(res_tmp.begin(), res_tmp.end(), CompareFirstOfPair());
139 v.reserve(res_tmp.size());
140 for (
typename std::vector<std::pair<size_t, T> >::iterator i = res_tmp.begin();
141 i != res_tmp.end(); ++i) {
142 index.push_back(i->first);
143 v.push_back(i->second * scale);
151 T, MVProd<Mstructured_constref<Mpacked_st_constref>, Vcompressed_scale_constref> >&
154 const size_t* rowind1 = v_.m.index;
156 const size_t i1_end = v_.m.m.s;
158 const size_t i2_end = v_.v.snn;
160 if (rowind1[i1] < rowind1[i2]) {
161 if (++i1 == i1_end) {
break; }
162 }
else if (rowind1[i1] > rowind1[i2]) {
163 if (++i2 == i2_end) {
break; }
166 const T v2 = v_.v[i2];
167 for (
size_t i = 0; i != v.size(); ++i) { v[i] += v_.m.m(i, rowind1[i1]) * v2; }
168 if (++i1 == i1_end) {
break; }
169 if (++i2 == i2_end) {
break; }
174 index.assign(v_.m.index, v_.m.index + v.size());
181 T, MVProd<Mstructured_constref<Mpacked_st_constref>, Vincrement_scale_constref> >&
185 index.assign(v_.m.index, v_.m.index + v.size());
186 Vector<T> vb(v_.m.m.s);
187 for (
size_t i = 0; i != vb.size(); ++i) { vb[i] = v_.v[index[i]]; }
188 Vector<T, Vincrement_ref> va(index.size(), &v[0]);
189 va = v_.scale * (v_.m.m * vb);
193 T operator[](
size_t i)
const {
194 const std::vector<size_t>::const_iterator ii = std::find(index.begin(), index.end(), i);
195 if (ii == index.end()) {
return 0; }
196 return v[ii - index.begin()];
201 friend logging::Logger& operator<<(logging::Logger& l,
const Vector& v) {
202 l <<
"compressed vector ";
203 l.write(v.s, &v.index[0], 1,
"index");
204 l.write(v.s, &v.v[0], 1,
"value");
211 std::vector<size_t> index;
216Vector<T, Vcompressed> Vector<T, Vcompressed>::null;
218struct Vcompressed_scale_constref {
219 typedef Vcompressed_scale_constref base;
220 typedef Vcompressed_scale_constref const_base;
221 typedef Vcompressed copy;
225class Vector<T, Vcompressed_scale_constref> {
227 Vector() : s(0), snn(0), scale(1), index(0), v(0) {}
229 Vector(
const Vector& v_) : s(v_.s), snn(v_.snn), scale(v_.scale), index(v_.index), v(v_.v) {}
231 Vector(
const Vector<T, Vcompressed>& v_)
232 : s(v_.s), snn(v_.index.size()), scale(1), index(&v_.index[0]), v(&v_.v[0]) {}
234 Vector(
size_t s_,
size_t snn_,
const size_t* index_,
const T* value_, T scale_)
235 : s(s_), snn(snn_), scale(scale_), index(index_), v(value_) {}
237 size_t size()
const {
return s; }
239 size_t get_nb_nonzero()
const {
return snn; }
241 bool is_null()
const {
return this == &null; }
243 T operator[](
size_t i)
const {
244 const size_t* ii = std::find(index, index + snn, i);
245 if (ii == index + snn) {
return 0; }
246 return v[ii - index];
249 Vector& operator*=(T scale_) {
254 Vector operator*(T scale_) {
return Vector(s, snn, index, v, scale * scale_); }
268Vector<T, Vcompressed_scale_constref> Vector<T, Vcompressed_scale_constref>::null;
#define THROW
Definition b2exception.H:198
static T * get(size_t size)
Definition b2linear_algebra_utils.H:50
Contains the base classes for implementing Finite Elements.
Definition b2boundary_condition.H:32
GenericException< UnimplementedError_name > UnimplementedError
Definition b2exception.H:314