OR-Tools  8.2
sparse_vector.h
Go to the documentation of this file.
1 // Copyright 2010-2018 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
14 // Classes to represent sparse vectors.
15 //
16 // The following are very good references for terminology, data structures,
17 // and algorithms:
18 //
19 // I.S. Duff, A.M. Erisman and J.K. Reid, "Direct Methods for Sparse Matrices",
20 // Clarendon, Oxford, UK, 1987, ISBN 0-19-853421-3,
21 // http://www.amazon.com/dp/0198534213.
22 //
23 //
24 // T.A. Davis, "Direct methods for Sparse Linear Systems", SIAM, Philadelphia,
25 // 2006, ISBN-13: 978-0-898716-13, http://www.amazon.com/dp/0898716136.
26 //
27 //
28 // Both books also contain a wealth of references.
29 
30 #ifndef OR_TOOLS_LP_DATA_SPARSE_VECTOR_H_
31 #define OR_TOOLS_LP_DATA_SPARSE_VECTOR_H_
32 
33 #include <algorithm>
34 #include <cstring>
35 #include <memory>
36 #include <string>
37 
38 #include "absl/strings/str_format.h"
40 #include "ortools/base/logging.h" // for CHECK*
45 
46 namespace operations_research {
47 namespace glop {
48 
49 template <typename IndexType>
50 class SparseVectorEntry;
51 
52 // --------------------------------------------------------
53 // SparseVector
54 // --------------------------------------------------------
55 // This class allows to store a vector taking advantage of its sparsity.
56 // Space complexity is in O(num_entries).
57 // In the current implementation, entries are stored in a first-in order (order
58 // of SetCoefficient() calls) when they are added; then the "cleaning" process
59 // sorts them by index (and duplicates are removed: the last entry takes
60 // precedence).
61 // Many methods assume that the entries are sorted by index and without
62 // duplicates, and DCHECK() that.
63 //
64 // Default copy construction is fully supported.
65 //
66 // This class uses strong integer types (i.e. no implicit cast to/from other
67 // integer types) for both:
68 // - the index of entries (eg. SparseVector<RowIndex> is a SparseColumn,
69 // see ./sparse_column.h).
70 // - the *internal* indices of entries in the internal storage, which is an
71 // entirely different type: EntryType.
72 // This class can be extended with a custom iterator/entry type for the
73 // iterator-based API. This can be used to extend the interface with additional
74 // methods for the entries returned by the iterators; for an example of such
75 // extension, see SparseColumnEntry in sparse_column.h. The custom entries and
76 // iterators should be derived from SparseVectorEntry and SparseVectorIterator,
77 // or at least provide the same public and protected interface.
78 //
79 // TODO(user): un-expose this type to client; by getting rid of the
80 // index-based APIs and leveraging iterator-based APIs; if possible.
81 template <typename IndexType,
82  typename IteratorType = VectorIterator<SparseVectorEntry<IndexType>>>
83 class SparseVector {
84  public:
85  typedef IndexType Index;
86 
89 
90  using Iterator = IteratorType;
91  using Entry = typename Iterator::Entry;
92 
94 
95  // NOTE(user): STL uses the expensive copy constructor when relocating
96  // elements of a vector, unless the move constructor exists *and* it is marked
97  // as noexcept. However, the noexcept annotation is banned by the style guide,
98  // and the only way to get it is by using the default move constructor and
99  // assignment operator generated by the compiler.
100  SparseVector(const SparseVector& other);
101 #if !defined(_MSC_VER)
102  SparseVector(SparseVector&& other) = default;
103 #endif
104 
106 #if !defined(_MSC_VER)
107  SparseVector& operator=(SparseVector&& other) = default;
108 #endif
109 
110  // Read-only API for a given SparseVector entry. The typical way for a
111  // client to use this is to use the natural range iteration defined by the
112  // Iterator class below:
113  // SparseVector<int> v;
114  // ...
115  // for (const SparseVector<int>::Entry e : v) {
116  // LOG(INFO) << "Index: " << e.index() << ", Coeff: " << e.coefficient();
117  // }
118  //
119  // Note that this can only be used when the vector has no duplicates.
120  //
121  // Note(user): using either "const SparseVector<int>::Entry&" or
122  // "const SparseVector<int>::Entry" yields the exact same performance on the
123  // netlib, thus we recommend to use the latter version, for consistency.
124  Iterator begin() const;
125  Iterator end() const;
126 
127  // Clears the vector, i.e. removes all entries.
128  void Clear();
129 
130  // Clears the vector and releases the memory it uses.
132 
133  // Reserve the underlying storage for the given number of entries.
134  void Reserve(EntryIndex new_capacity);
135 
136  // Returns true if the vector is empty.
137  bool IsEmpty() const;
138 
139  // Cleans the vector, i.e. removes zero-values entries, removes duplicates
140  // entries and sorts remaining entries in increasing index order.
141  // Runs in O(num_entries * log(num_entries)).
142  void CleanUp();
143 
144  // Returns true if the entries of this SparseVector are in strictly increasing
145  // index order and if the vector contains no duplicates nor zero coefficients.
146  // Runs in O(num_entries). It is not const because it modifies
147  // possibly_contains_duplicates_.
148  bool IsCleanedUp() const;
149 
150  // Swaps the content of this sparse vector with the one passed as argument.
151  // Works in O(1).
152  void Swap(SparseVector* other);
153 
154  // Populates the current vector from sparse_vector.
155  // Runs in O(num_entries).
156  void PopulateFromSparseVector(const SparseVector& sparse_vector);
157 
158  // Populates the current vector from dense_vector.
159  // Runs in O(num_indices_in_dense_vector).
160  void PopulateFromDenseVector(const DenseVector& dense_vector);
161 
162  // Appends all entries from sparse_vector to the current vector; the indices
163  // of the appended entries are increased by offset. If the current vector
164  // already has a value at an index changed by this method, this value is
165  // overwritten with the value from sparse_vector.
166  // Note that while offset may be negative itself, the indices of all entries
167  // after applying the offset must be non-negative.
168  void AppendEntriesWithOffset(const SparseVector& sparse_vector, Index offset);
169 
170  // Returns true when the vector contains no duplicates. Runs in
171  // O(max_index + num_entries), max_index being the largest index in entry.
172  // This method allocates (and deletes) a Boolean array of size max_index.
173  // Note that we use a mutable Boolean to make subsequent call runs in O(1).
174  bool CheckNoDuplicates() const;
175 
176  // Same as CheckNoDuplicates() except it uses a reusable boolean vector
177  // to make the code more efficient. Runs in O(num_entries).
178  // Note that boolean_vector should be initialized to false before calling this
179  // method; It will remain equal to false after calls to CheckNoDuplicates().
180  // Note that we use a mutable Boolean to make subsequent call runs in O(1).
181  bool CheckNoDuplicates(StrictITIVector<Index, bool>* boolean_vector) const;
182 
183  // Defines the coefficient at index, i.e. vector[index] = value;
185 
186  // Removes an entry from the vector if present. The order of the other entries
187  // is preserved. Runs in O(num_entries).
189 
190  // Sets to 0.0 (i.e. remove) all entries whose fabs() is lower or equal to
191  // the given threshold.
193 
194  // Same as RemoveNearZeroEntries, but the entry magnitude of each row is
195  // multiplied by weights[row] before being compared with threshold.
197  const DenseVector& weights);
198 
199  // Moves the entry with given Index to the first position in the vector. If
200  // the entry is not present, nothing happens.
202 
203  // Moves the entry with given Index to the last position in the vector. If
204  // the entry is not present, nothing happens.
206 
207  // Multiplies all entries by factor.
208  // i.e. entry.coefficient *= factor.
210 
211  // Multiplies all entries by its corresponding factor,
212  // i.e. entry.coefficient *= factors[entry.index].
213  void ComponentWiseMultiply(const DenseVector& factors);
214 
215  // Divides all entries by factor.
216  // i.e. entry.coefficient /= factor.
218 
219  // Divides all entries by its corresponding factor,
220  // i.e. entry.coefficient /= factors[entry.index].
221  void ComponentWiseDivide(const DenseVector& factors);
222 
223  // Populates a dense vector from the sparse vector.
224  // Runs in O(num_indices) as the dense vector values have to be reset to 0.0.
225  void CopyToDenseVector(Index num_indices, DenseVector* dense_vector) const;
226 
227  // Populates a dense vector from the permuted sparse vector.
228  // Runs in O(num_indices) as the dense vector values have to be reset to 0.0.
230  Index num_indices,
231  DenseVector* dense_vector) const;
232 
233  // Performs the operation dense_vector += multiplier * this.
234  // This is known as multiply-accumulate or (fused) multiply-add.
236  DenseVector* dense_vector) const;
237 
238  // WARNING: BOTH vectors (the current and the destination) MUST be "clean",
239  // i.e. sorted and without duplicates.
240  // Performs the operation accumulator_vector += multiplier * this, removing
241  // a given index which must be in both vectors, and pruning new entries whose
242  // absolute value are under the given drop_tolerance.
244  Fractional multiplier, Index removed_common_index,
245  Fractional drop_tolerance, SparseVector* accumulator_vector) const;
246 
247  // Same as AddMultipleToSparseVectorAndDeleteCommonIndex() but instead of
248  // deleting the common index, leave it unchanged.
250  Fractional multiplier, Index removed_common_index,
251  Fractional drop_tolerance, SparseVector* accumulator_vector) const;
252 
253  // Applies the index permutation to all entries: index = index_perm[index];
254  void ApplyIndexPermutation(const IndexPermutation& index_perm);
255 
256  // Same as ApplyIndexPermutation but deletes the index if index_perm[index]
257  // is negative.
259 
260  // Removes the entries for which index_perm[index] is non-negative and appends
261  // them to output. Note that the index of the entries are NOT permuted.
262  void MoveTaggedEntriesTo(const IndexPermutation& index_perm,
263  SparseVector* output);
264 
265  // Returns the coefficient at position index.
266  // Call with care: runs in O(number-of-entries) as entries may not be sorted.
268 
269  // Note this method can only be used when the vector has no duplicates.
270  EntryIndex num_entries() const {
272  return EntryIndex(num_entries_);
273  }
274 
275  // Returns the first entry's index and coefficient; note that 'first' doesn't
276  // mean 'entry with the smallest index'.
277  // Runs in O(1).
278  // Note this method can only be used when the vector has no duplicates.
281  return GetIndex(EntryIndex(0));
282  }
285  return GetCoefficient(EntryIndex(0));
286  }
287 
288  // Like GetFirst*, but for the last entry.
289  Index GetLastIndex() const {
291  return GetIndex(num_entries() - 1);
292  }
295  return GetCoefficient(num_entries() - 1);
296  }
297 
298  // Allows to loop over the entry indices like this:
299  // for (const EntryIndex i : sparse_vector.AllEntryIndices()) { ... }
300  // TODO(user): consider removing this, in favor of the natural range
301  // iteration.
303  return ::util::IntegerRange<EntryIndex>(EntryIndex(0), num_entries_);
304  }
305 
306  // Returns true if this vector is exactly equal to the given one, i.e. all its
307  // index indices and coefficients appear in the same order and are equal.
308  bool IsEqualTo(const SparseVector& other) const;
309 
310  // An exhaustive, pretty-printed listing of the entries, in their
311  // internal order. a.DebugString() == b.DebugString() iff a.IsEqualTo(b).
312  std::string DebugString() const;
313 
314  protected:
315  // Adds a new entry to the sparse vector, growing the internal buffer if
316  // needed. It does not set may_contain_duplicates_ to true.
318  DCHECK_GE(index, 0);
319  // Grow the internal storage if there is no space left for the new entry. We
320  // increase the size to max(4, 1.5*current capacity).
321  if (num_entries_ == capacity_) {
322  // Reserve(capacity_ == 0 ? EntryIndex(4)
323  // : EntryIndex(2 * capacity_.value()));
324  Reserve(capacity_ == 0 ? EntryIndex(4)
325  : EntryIndex(2 * capacity_.value()));
327  }
328  const EntryIndex new_entry_index = num_entries_;
329  ++num_entries_;
330  MutableIndex(new_entry_index) = index;
331  MutableCoefficient(new_entry_index) = value;
332  }
333 
334  // Resizes the sparse vector to a smaller size, without re-allocating the
335  // internal storage.
336  void ResizeDown(EntryIndex new_size) {
337  DCHECK_GE(new_size, 0);
338  DCHECK_LE(new_size, num_entries_);
339  num_entries_ = new_size;
340  }
341 
342  // Read-only access to the indices and coefficients of the entries of the
343  // sparse vector.
344  Index GetIndex(EntryIndex i) const {
345  DCHECK_GE(i, 0);
347  return index_[i.value()];
348  }
349  Fractional GetCoefficient(EntryIndex i) const {
350  DCHECK_GE(i, 0);
352  return coefficient_[i.value()];
353  }
354 
355  // Mutable access to the indices and coefficients of the entries of the sparse
356  // vector.
357  Index& MutableIndex(EntryIndex i) {
358  DCHECK_GE(i, 0);
360  return index_[i.value()];
361  }
363  DCHECK_GE(i, 0);
365  return coefficient_[i.value()];
366  }
367 
368  // The internal storage of the sparse vector. Both the indices and the
369  // coefficients are stored in the same buffer; the first
370  // sizeof(Index)*capacity_ bytes are used for storing the indices, the
371  // following sizeof(Fractional)*capacity_ bytes contain the values. This
372  // representation ensures that for small vectors, both the indices and the
373  // coefficients are in the same page/cache line.
374  // We use a single buffer for both arrays. The amount of data copied during
375  // relocations is the same in both cases, and it is much smaller than the cost
376  // of an additional allocation - especially when the vectors are small.
377  // Moreover, using two separate vectors/buffers would mean that even small
378  // vectors would be spread across at least two different cache lines.
379  std::unique_ptr<char[]> buffer_;
380  EntryIndex num_entries_;
381  EntryIndex capacity_;
382 
383  // Pointers to the first elements of the index and coefficient arrays.
386 
387  // This is here to speed up the CheckNoDuplicates() methods and is mutable
388  // so we can perform checks on const argument.
390 
391  private:
392  // Actual implementation of AddMultipleToSparseVectorAndDeleteCommonIndex()
393  // and AddMultipleToSparseVectorAndIgnoreCommonIndex() which is shared.
394  void AddMultipleToSparseVectorInternal(
395  bool delete_common_index, Fractional multiplier, Index common_index,
396  Fractional drop_tolerance, SparseVector* accumulator_vector) const;
397 };
398 
399 // --------------------------------------------------------
400 // SparseVectorEntry
401 // --------------------------------------------------------
402 
403 // A reference-like class that points to a certain element of a sparse data
404 // structure that stores its elements in two parallel arrays. The main purpose
405 // of the entry class is to support implementation of iterator objects over the
406 // sparse data structure.
407 // Note that the entry object does not own the data, and it is valid only as
408 // long as the underlying sparse data structure; it may also be invalidated if
409 // the underlying sparse data structure is modified.
410 template <typename IndexType>
412  public:
413  using Index = IndexType;
414 
415  Index index() const { return index_[i_.value()]; }
416  Fractional coefficient() const { return coefficient_[i_.value()]; }
417 
418  protected:
419  // Creates the sparse vector entry from the given base pointers and the index.
420  // We accept the low-level data structures rather than a SparseVector
421  // reference to make it possible to use the SparseVectorEntry and
422  // SparseVectorIterator classes also for other data structures using the same
423  // internal data representation.
424  // Note that the constructor is intentionally made protected, so that the
425  // entry can be created only as a part of the construction of an iterator over
426  // a sparse data structure.
428  EntryIndex i)
429  : i_(i), index_(indices), coefficient_(coefficients) {}
430 
431  // The index of the sparse vector entry represented by this object.
432  EntryIndex i_;
433  // The index and coefficient arrays of the sparse vector.
434  // NOTE(user): Keeping directly the index and the base pointers gives the
435  // best performance with a tiny margin of the options:
436  // 1. keep the base pointers and an index of the current entry,
437  // 2. keep pointers to the current index and the current coefficient and
438  // increment both when moving the iterator.
439  // 3. keep a pointer to the sparse vector object and the index of the current
440  // entry.
441  const Index* index_;
443 };
444 
445 template <typename IndexType, typename IteratorType>
447  return Iterator(this->index_, this->coefficient_, EntryIndex(0));
448 }
449 
450 template <typename IndexType, typename IteratorType>
452  return Iterator(this->index_, this->coefficient_, num_entries_);
453 }
454 
455 // --------------------------------------------------------
456 // SparseVector implementation
457 // --------------------------------------------------------
458 template <typename IndexType, typename IteratorType>
460  : num_entries_(0),
461  capacity_(0),
462  index_(nullptr),
463  coefficient_(nullptr),
464  may_contain_duplicates_(false) {}
465 
466 template <typename IndexType, typename IteratorType>
468  PopulateFromSparseVector(other);
469 }
470 
471 template <typename IndexType, typename IteratorType>
474  PopulateFromSparseVector(other);
475  return *this;
476 }
477 
478 template <typename IndexType, typename IteratorType>
480  num_entries_ = EntryIndex(0);
481  may_contain_duplicates_ = false;
482 }
483 
484 template <typename IndexType, typename IteratorType>
486  capacity_ = EntryIndex(0);
487  num_entries_ = EntryIndex(0);
488  index_ = nullptr;
489  coefficient_ = nullptr;
490  buffer_.reset();
491  may_contain_duplicates_ = false;
492 }
493 
494 template <typename IndexType, typename IteratorType>
495 void SparseVector<IndexType, IteratorType>::Reserve(EntryIndex new_capacity) {
496  if (new_capacity <= capacity_) return;
497  // Round up the capacity to a multiple of four. This way, the start of the
498  // coefficient array will be aligned to 16-bytes, provided that the buffer
499  // used for storing the data is aligned in that way.
500  if (new_capacity.value() & 3) {
501  new_capacity += EntryIndex(4 - (new_capacity.value() & 3));
502  }
503 
504  const size_t index_buffer_size = new_capacity.value() * sizeof(Index);
505  const size_t value_buffer_size = new_capacity.value() * sizeof(Fractional);
506  const size_t new_buffer_size = index_buffer_size + value_buffer_size;
507  std::unique_ptr<char[]> new_buffer(new char[new_buffer_size]);
508  IndexType* const new_index = reinterpret_cast<Index*>(new_buffer.get());
509  Fractional* const new_coefficient =
510  reinterpret_cast<Fractional*>(new_index + new_capacity.value());
511 
512  // Avoid copying the data if the vector is empty.
513  if (num_entries_ > 0) {
514  // NOTE(user): We use memmove instead of std::copy, because the latter
515  // leads to naive copying code when used with strong ints (a loop that
516  // copies a single 32-bit value in each iteration), and as of 06/2016,
517  // memmove is 3-4x faster on Haswell.
518  std::memmove(new_index, index_, sizeof(IndexType) * num_entries_.value());
519  std::memmove(new_coefficient, coefficient_,
520  sizeof(Fractional) * num_entries_.value());
521  }
522  std::swap(buffer_, new_buffer);
523  index_ = new_index;
524  coefficient_ = new_coefficient;
525  capacity_ = new_capacity;
526 }
527 
528 template <typename IndexType, typename IteratorType>
530  return num_entries_ == EntryIndex(0);
531 }
532 
533 template <typename IndexType, typename IteratorType>
535  std::swap(buffer_, other->buffer_);
536  std::swap(num_entries_, other->num_entries_);
537  std::swap(capacity_, other->capacity_);
538  std::swap(may_contain_duplicates_, other->may_contain_duplicates_);
539  std::swap(index_, other->index_);
540  std::swap(coefficient_, other->coefficient_);
541 }
542 
543 template <typename IndexType, typename IteratorType>
545  // TODO(user): Implement in-place sorting of the entries and cleanup. The
546  // current version converts the data to an array-of-pairs representation that
547  // can be sorted easily with std::stable_sort, and the converts the sorted
548  // data back to the struct-of-arrays implementation.
549  // The current version is ~20% slower than the in-place sort on the
550  // array-of-struct representation. It is not visible on GLOP benchmarks, but
551  // it increases peak memory usage by ~8%.
552  // Implementing in-place search will require either implementing a custom
553  // sorting code, or custom iterators that abstract away the internal
554  // representation.
555  std::vector<std::pair<Index, Fractional>> entries;
556  entries.reserve(num_entries_.value());
557  for (EntryIndex i(0); i < num_entries_; ++i) {
558  entries.emplace_back(GetIndex(i), GetCoefficient(i));
559  }
560  std::stable_sort(
561  entries.begin(), entries.end(),
562  [](const std::pair<Index, Fractional>& a,
563  const std::pair<Index, Fractional>& b) { return a.first < b.first; });
564 
565  EntryIndex new_size(0);
566  for (int i = 0; i < num_entries_; ++i) {
567  const std::pair<Index, Fractional> entry = entries[i];
568  if (entry.second == 0.0) continue;
569  if (i + 1 == num_entries_ || entry.first != entries[i + 1].first) {
570  MutableIndex(new_size) = entry.first;
571  MutableCoefficient(new_size) = entry.second;
572  ++new_size;
573  }
574  }
575  ResizeDown(new_size);
576  may_contain_duplicates_ = false;
577 }
578 
579 template <typename IndexType, typename IteratorType>
581  Index previous_index(-1);
582  for (const EntryIndex i : AllEntryIndices()) {
583  const Index index = GetIndex(i);
584  if (index <= previous_index || GetCoefficient(i) == 0.0) return false;
585  previous_index = index;
586  }
587  may_contain_duplicates_ = false;
588  return true;
589 }
590 
591 template <typename IndexType, typename IteratorType>
593  const SparseVector& sparse_vector) {
594  // Clear the sparse vector before reserving the new capacity. If we didn't do
595  // this, Reserve would have to copy the current contents of the vector if it
596  // allocated a new buffer. This would be wasteful, since we overwrite it in
597  // the next step anyway.
598  Clear();
599  Reserve(sparse_vector.capacity_);
600  // NOTE(user): Using a single memmove would be slightly faster, but it
601  // would not work correctly if this already had a greater capacity than
602  // sparse_vector, because the coefficient_ pointer would be positioned
603  // incorrectly.
604  std::memmove(index_, sparse_vector.index_,
605  sizeof(Index) * sparse_vector.num_entries_.value());
606  std::memmove(coefficient_, sparse_vector.coefficient_,
607  sizeof(Fractional) * sparse_vector.num_entries_.value());
608  num_entries_ = sparse_vector.num_entries_;
609  may_contain_duplicates_ = sparse_vector.may_contain_duplicates_;
610 }
611 
612 template <typename IndexType, typename IteratorType>
614  const DenseVector& dense_vector) {
615  Clear();
616  const Index num_indices(dense_vector.size());
617  for (Index index(0); index < num_indices; ++index) {
618  if (dense_vector[index] != 0.0) {
619  SetCoefficient(index, dense_vector[index]);
620  }
621  }
622  may_contain_duplicates_ = false;
623 }
624 
625 template <typename IndexType, typename IteratorType>
627  const SparseVector& sparse_vector, Index offset) {
628  for (const EntryIndex i : sparse_vector.AllEntryIndices()) {
629  const Index new_index = offset + sparse_vector.GetIndex(i);
630  DCHECK_GE(new_index, 0);
631  AddEntry(new_index, sparse_vector.GetCoefficient(i));
632  }
633  may_contain_duplicates_ = true;
634 }
635 
636 template <typename IndexType, typename IteratorType>
638  StrictITIVector<IndexType, bool>* boolean_vector) const {
639  RETURN_VALUE_IF_NULL(boolean_vector, false);
640  // Note(user): Using num_entries() or any function that call
641  // CheckNoDuplicates() again will cause an infinite loop!
642  if (!may_contain_duplicates_ || num_entries_ <= 1) return true;
643 
644  // Update size if needed.
645  const Index max_index =
646  *std::max_element(index_, index_ + num_entries_.value());
647  if (boolean_vector->size() <= max_index) {
648  boolean_vector->resize(max_index + 1, false);
649  }
650 
651  may_contain_duplicates_ = false;
652  for (const EntryIndex i : AllEntryIndices()) {
653  const Index index = GetIndex(i);
654  if ((*boolean_vector)[index]) {
655  may_contain_duplicates_ = true;
656  break;
657  }
658  (*boolean_vector)[index] = true;
659  }
660 
661  // Reset boolean_vector to false.
662  for (const EntryIndex i : AllEntryIndices()) {
663  (*boolean_vector)[GetIndex(i)] = false;
664  }
665  return !may_contain_duplicates_;
666 }
667 
668 template <typename IndexType, typename IteratorType>
670  // Using num_entries() or any function in that will call CheckNoDuplicates()
671  // again will cause an infinite loop!
672  if (!may_contain_duplicates_ || num_entries_ <= 1) return true;
673  StrictITIVector<Index, bool> boolean_vector;
674  return CheckNoDuplicates(&boolean_vector);
675 }
676 
677 // Do not filter out zero values, as a zero value can be added to reset a
678 // previous value. Zero values and duplicates will be removed by CleanUp.
679 template <typename IndexType, typename IteratorType>
681  Fractional value) {
682  AddEntry(index, value);
683  may_contain_duplicates_ = true;
684 }
685 
686 template <typename IndexType, typename IteratorType>
688  DCHECK(CheckNoDuplicates());
689  EntryIndex i(0);
690  const EntryIndex end(num_entries());
691  while (i < end && GetIndex(i) != index) {
692  ++i;
693  }
694  if (i == end) return;
695  const int num_moved_entries = (num_entries_ - i).value() - 1;
696  std::memmove(index_ + i.value(), index_ + i.value() + 1,
697  sizeof(Index) * num_moved_entries);
698  std::memmove(coefficient_ + i.value(), coefficient_ + i.value() + 1,
699  sizeof(Fractional) * num_moved_entries);
700  --num_entries_;
701 }
702 
703 template <typename IndexType, typename IteratorType>
705  Fractional threshold) {
706  DCHECK(CheckNoDuplicates());
707  EntryIndex new_index(0);
708  for (const EntryIndex i : AllEntryIndices()) {
709  const Fractional magnitude = fabs(GetCoefficient(i));
710  if (magnitude > threshold) {
711  MutableIndex(new_index) = GetIndex(i);
712  MutableCoefficient(new_index) = GetCoefficient(i);
713  ++new_index;
714  }
715  }
716  ResizeDown(new_index);
717 }
718 
719 template <typename IndexType, typename IteratorType>
721  Fractional threshold, const DenseVector& weights) {
722  DCHECK(CheckNoDuplicates());
723  EntryIndex new_index(0);
724  for (const EntryIndex i : AllEntryIndices()) {
725  if (fabs(GetCoefficient(i)) * weights[GetIndex(i)] > threshold) {
726  MutableIndex(new_index) = GetIndex(i);
727  MutableCoefficient(new_index) = GetCoefficient(i);
728  ++new_index;
729  }
730  }
731  ResizeDown(new_index);
732 }
733 
734 template <typename IndexType, typename IteratorType>
736  Index index) {
737  DCHECK(CheckNoDuplicates());
738  for (const EntryIndex i : AllEntryIndices()) {
739  if (GetIndex(i) == index) {
740  std::swap(MutableIndex(EntryIndex(0)), MutableIndex(i));
741  std::swap(MutableCoefficient(EntryIndex(0)), MutableCoefficient(i));
742  return;
743  }
744  }
745 }
746 
747 template <typename IndexType, typename IteratorType>
749  Index index) {
750  DCHECK(CheckNoDuplicates());
751  const EntryIndex last_entry = num_entries() - 1;
752  for (const EntryIndex i : AllEntryIndices()) {
753  if (GetIndex(i) == index) {
754  std::swap(MutableIndex(last_entry), MutableIndex(i));
755  std::swap(MutableCoefficient(last_entry), MutableCoefficient(i));
756  return;
757  }
758  }
759 }
760 
761 template <typename IndexType, typename IteratorType>
763  Fractional factor) {
764  for (const EntryIndex i : AllEntryIndices()) {
765  MutableCoefficient(i) *= factor;
766  }
767 }
768 
769 template <typename IndexType, typename IteratorType>
771  const DenseVector& factors) {
772  for (const EntryIndex i : AllEntryIndices()) {
773  MutableCoefficient(i) *= factors[GetIndex(i)];
774  }
775 }
776 
777 template <typename IndexType, typename IteratorType>
779  Fractional factor) {
780  for (const EntryIndex i : AllEntryIndices()) {
781  MutableCoefficient(i) /= factor;
782  }
783 }
784 
785 template <typename IndexType, typename IteratorType>
787  const DenseVector& factors) {
788  for (const EntryIndex i : AllEntryIndices()) {
789  MutableCoefficient(i) /= factors[GetIndex(i)];
790  }
791 }
792 
793 template <typename IndexType, typename IteratorType>
795  Index num_indices, DenseVector* dense_vector) const {
796  RETURN_IF_NULL(dense_vector);
797  dense_vector->AssignToZero(num_indices);
798  for (const EntryIndex i : AllEntryIndices()) {
799  (*dense_vector)[GetIndex(i)] = GetCoefficient(i);
800  }
801 }
802 
803 template <typename IndexType, typename IteratorType>
805  const IndexPermutation& index_perm, Index num_indices,
806  DenseVector* dense_vector) const {
807  RETURN_IF_NULL(dense_vector);
808  dense_vector->AssignToZero(num_indices);
809  for (const EntryIndex i : AllEntryIndices()) {
810  (*dense_vector)[index_perm[GetIndex(i)]] = GetCoefficient(i);
811  }
812 }
813 
814 template <typename IndexType, typename IteratorType>
816  Fractional multiplier, DenseVector* dense_vector) const {
817  RETURN_IF_NULL(dense_vector);
818  if (multiplier == 0.0) return;
819  for (const EntryIndex i : AllEntryIndices()) {
820  (*dense_vector)[GetIndex(i)] += multiplier * GetCoefficient(i);
821  }
822 }
823 
824 template <typename IndexType, typename IteratorType>
827  Fractional multiplier, Index removed_common_index,
828  Fractional drop_tolerance, SparseVector* accumulator_vector) const {
829  AddMultipleToSparseVectorInternal(true, multiplier, removed_common_index,
830  drop_tolerance, accumulator_vector);
831 }
832 
833 template <typename IndexType, typename IteratorType>
836  Fractional multiplier, Index removed_common_index,
837  Fractional drop_tolerance, SparseVector* accumulator_vector) const {
838  AddMultipleToSparseVectorInternal(false, multiplier, removed_common_index,
839  drop_tolerance, accumulator_vector);
840 }
841 
842 template <typename IndexType, typename IteratorType>
844  bool delete_common_index, Fractional multiplier, Index common_index,
845  Fractional drop_tolerance, SparseVector* accumulator_vector) const {
846  // DCHECK that the input is correct.
847  DCHECK(IsCleanedUp());
848  DCHECK(accumulator_vector->IsCleanedUp());
849  DCHECK(CheckNoDuplicates());
850  DCHECK(accumulator_vector->CheckNoDuplicates());
851  DCHECK_NE(0.0, LookUpCoefficient(common_index));
852  DCHECK_NE(0.0, accumulator_vector->LookUpCoefficient(common_index));
853 
854  // Implementation notes: we create a temporary SparseVector "c" to hold the
855  // result. We call "a" the first vector (i.e. the current object, which will
856  // be multiplied by "multiplier"), and "b" the second vector (which will be
857  // swapped with "c" at the end to hold the result).
858  // We incrementally build c as: a * multiplier + b.
859  const SparseVector& a = *this;
860  const SparseVector& b = *accumulator_vector;
861  SparseVector c;
862  EntryIndex ia(0); // Index in the vector "a"
863  EntryIndex ib(0); // ... and "b"
864  EntryIndex ic(0); // ... and "c"
865  const EntryIndex size_a = a.num_entries();
866  const EntryIndex size_b = b.num_entries();
867  const int size_adjustment = delete_common_index ? -2 : 0;
868  const EntryIndex new_size_upper_bound = size_a + size_b + size_adjustment;
869  c.Reserve(new_size_upper_bound);
870  c.num_entries_ = new_size_upper_bound;
871  while ((ia < size_a) && (ib < size_b)) {
872  const Index index_a = a.GetIndex(ia);
873  const Index index_b = b.GetIndex(ib);
874  // Benchmarks done by fdid@ in 2012 showed that it was faster to put the
875  // "if" clauses in that specific order.
876  if (index_a == index_b) {
877  if (index_a != common_index) {
878  const Fractional a_coeff_mul = multiplier * a.GetCoefficient(ia);
879  const Fractional b_coeff = b.GetCoefficient(ib);
880  const Fractional sum = a_coeff_mul + b_coeff;
881 
882  // We do not want to leave near-zero entries.
883  // TODO(user): expose the tolerance used here.
884  if (std::abs(sum) > drop_tolerance) {
885  c.MutableIndex(ic) = index_a;
886  c.MutableCoefficient(ic) = sum;
887  ++ic;
888  }
889  } else if (!delete_common_index) {
890  c.MutableIndex(ic) = b.GetIndex(ib);
891  c.MutableCoefficient(ic) = b.GetCoefficient(ib);
892  ++ic;
893  }
894  ++ia;
895  ++ib;
896  } else if (index_a < index_b) {
897  c.MutableIndex(ic) = index_a;
898  c.MutableCoefficient(ic) = multiplier * a.GetCoefficient(ia);
899  ++ia;
900  ++ic;
901  } else { // index_b < index_a
902  c.MutableIndex(ic) = b.GetIndex(ib);
903  c.MutableCoefficient(ic) = b.GetCoefficient(ib);
904  ++ib;
905  ++ic;
906  }
907  }
908  while (ia < size_a) {
909  c.MutableIndex(ic) = a.GetIndex(ia);
910  c.MutableCoefficient(ic) = multiplier * a.GetCoefficient(ia);
911  ++ia;
912  ++ic;
913  }
914  while (ib < size_b) {
915  c.MutableIndex(ic) = b.GetIndex(ib);
916  c.MutableCoefficient(ic) = b.GetCoefficient(ib);
917  ++ib;
918  ++ic;
919  }
920  c.ResizeDown(ic);
921  c.may_contain_duplicates_ = false;
922  c.Swap(accumulator_vector);
923 }
924 
925 template <typename IndexType, typename IteratorType>
927  const IndexPermutation& index_perm) {
928  for (const EntryIndex i : AllEntryIndices()) {
929  MutableIndex(i) = index_perm[GetIndex(i)];
930  }
931 }
932 
933 template <typename IndexType, typename IteratorType>
935  const IndexPermutation& index_perm) {
936  EntryIndex new_index(0);
937  for (const EntryIndex i : AllEntryIndices()) {
938  const Index index = GetIndex(i);
939  if (index_perm[index] >= 0) {
940  MutableIndex(new_index) = index_perm[index];
941  MutableCoefficient(new_index) = GetCoefficient(i);
942  ++new_index;
943  }
944  }
945  ResizeDown(new_index);
946 }
947 
948 template <typename IndexType, typename IteratorType>
950  const IndexPermutation& index_perm, SparseVector* output) {
951  // Note that this function is called many times, so performance does matter
952  // and it is why we optimized the "nothing to do" case.
953  const EntryIndex end(num_entries_);
954  EntryIndex i(0);
955  while (true) {
956  if (i >= end) return; // "nothing to do" case.
957  if (index_perm[GetIndex(i)] >= 0) break;
958  ++i;
959  }
960  output->AddEntry(GetIndex(i), GetCoefficient(i));
961  for (EntryIndex j(i + 1); j < end; ++j) {
962  if (index_perm[GetIndex(j)] < 0) {
963  MutableIndex(i) = GetIndex(j);
964  MutableCoefficient(i) = GetCoefficient(j);
965  ++i;
966  } else {
967  output->AddEntry(GetIndex(j), GetCoefficient(j));
968  }
969  }
970  ResizeDown(i);
971 
972  // TODO(user): In the way we use this function, we know that will not
973  // happen, but it is better to be careful so we can check that properly in
974  // debug mode.
975  output->may_contain_duplicates_ = true;
976 }
977 
978 template <typename IndexType, typename IteratorType>
980  Index index) const {
981  Fractional value(0.0);
982  for (const EntryIndex i : AllEntryIndices()) {
983  if (GetIndex(i) == index) {
984  // Keep in mind the vector may contains several entries with the same
985  // index. In such a case the last one is returned.
986  // TODO(user): investigate whether an optimized version of
987  // LookUpCoefficient for "clean" columns yields speed-ups.
988  value = GetCoefficient(i);
989  }
990  }
991  return value;
992 }
993 
994 template <typename IndexType, typename IteratorType>
996  const SparseVector& other) const {
997  // We do not take into account the mutable value may_contain_duplicates_.
998  if (num_entries() != other.num_entries()) return false;
999  for (const EntryIndex i : AllEntryIndices()) {
1000  if (GetIndex(i) != other.GetIndex(i)) return false;
1001  if (GetCoefficient(i) != other.GetCoefficient(i)) return false;
1002  }
1003  return true;
1004 }
1005 
1006 template <typename IndexType, typename IteratorType>
1008  std::string s;
1009  for (const EntryIndex i : AllEntryIndices()) {
1010  if (i != 0) s += ", ";
1011  absl::StrAppendFormat(&s, "[%d]=%g", GetIndex(i).value(),
1012  GetCoefficient(i));
1013  }
1014  return s;
1015 }
1016 
1017 } // namespace glop
1018 } // namespace operations_research
1019 
1020 #endif // OR_TOOLS_LP_DATA_SPARSE_VECTOR_H_
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:887
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:886
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:889
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:888
#define DCHECK(condition)
Definition: base/logging.h:884
EntryIndex i_
Index index() const
IndexType Index
Fractional coefficient() const
const Fractional * coefficient_
SparseVectorEntry(const Index *indices, const Fractional *coefficients, EntryIndex i)
const Index * index_
void ComponentWiseMultiply(const DenseVector &factors)
void PopulateFromDenseVector(const DenseVector &dense_vector)
Fractional & MutableCoefficient(EntryIndex i)
void MoveTaggedEntriesTo(const IndexPermutation &index_perm, SparseVector *output)
void ApplyIndexPermutation(const IndexPermutation &index_perm)
Index GetIndex(EntryIndex i) const
void CopyToDenseVector(Index num_indices, DenseVector *dense_vector) const
Fractional LookUpCoefficient(Index index) const
SparseVector & operator=(const SparseVector &other)
::util::IntegerRange< EntryIndex > AllEntryIndices() const
void AddMultipleToDenseVector(Fractional multiplier, DenseVector *dense_vector) const
void ResizeDown(EntryIndex new_size)
void RemoveNearZeroEntriesWithWeights(Fractional threshold, const DenseVector &weights)
void PermutedCopyToDenseVector(const IndexPermutation &index_perm, Index num_indices, DenseVector *dense_vector) const
void AddMultipleToSparseVectorAndDeleteCommonIndex(Fractional multiplier, Index removed_common_index, Fractional drop_tolerance, SparseVector *accumulator_vector) const
void DivideByConstant(Fractional factor)
void MultiplyByConstant(Fractional factor)
SparseVector(SparseVector &&other)=default
void ComponentWiseDivide(const DenseVector &factors)
void ApplyPartialIndexPermutation(const IndexPermutation &index_perm)
SparseVector & operator=(SparseVector &&other)=default
void AddMultipleToSparseVectorAndIgnoreCommonIndex(Fractional multiplier, Index removed_common_index, Fractional drop_tolerance, SparseVector *accumulator_vector) const
void RemoveNearZeroEntries(Fractional threshold)
bool CheckNoDuplicates(StrictITIVector< Index, bool > *boolean_vector) const
void AddEntry(Index index, Fractional value)
bool IsEqualTo(const SparseVector &other) const
void Reserve(EntryIndex new_capacity)
void AppendEntriesWithOffset(const SparseVector &sparse_vector, Index offset)
Fractional GetCoefficient(EntryIndex i) const
void SetCoefficient(Index index, Fractional value)
void PopulateFromSparseVector(const SparseVector &sparse_vector)
StrictITIVector< Index, Fractional > DenseVector
Definition: sparse_vector.h:87
SparseVector(const SparseVector &other)
int64 value
IntegerValue GetCoefficient(const IntegerVariable var, const LinearExpression &expr)
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
int index
Definition: pack.cc:508
EntryIndex num_entries
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
#define RETURN_VALUE_IF_NULL(x, v)
Definition: return_macros.h:26
std::vector< double > coefficients