eigen.h revision 12391:ceeca8b41e4b
1/*
2    pybind11/eigen.h: Transparent conversion for dense and sparse Eigen matrices
3
4    Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
5
6    All rights reserved. Use of this source code is governed by a
7    BSD-style license that can be found in the LICENSE file.
8*/
9
10#pragma once
11
12#include "numpy.h"
13
14#if defined(__INTEL_COMPILER)
15#  pragma warning(disable: 1682) // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem)
16#elif defined(__GNUG__) || defined(__clang__)
17#  pragma GCC diagnostic push
18#  pragma GCC diagnostic ignored "-Wconversion"
19#  pragma GCC diagnostic ignored "-Wdeprecated-declarations"
20#  if __GNUC__ >= 7
21#    pragma GCC diagnostic ignored "-Wint-in-bool-context"
22#  endif
23#endif
24
25#include <Eigen/Core>
26#include <Eigen/SparseCore>
27
28#if defined(_MSC_VER)
29#  pragma warning(push)
30#  pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
31#endif
32
33// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit
34// move constructors that break things.  We could detect this an explicitly copy, but an extra copy
35// of matrices seems highly undesirable.
36static_assert(EIGEN_VERSION_AT_LEAST(3,2,7), "Eigen support in pybind11 requires Eigen >= 3.2.7");
37
38NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
39
40// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
41using EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
42template <typename MatrixType> using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;
43template <typename MatrixType> using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;
44
45NAMESPACE_BEGIN(detail)
46
47#if EIGEN_VERSION_AT_LEAST(3,3,0)
48using EigenIndex = Eigen::Index;
49#else
50using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE;
51#endif
52
53// Matches Eigen::Map, Eigen::Ref, blocks, etc:
54template <typename T> using is_eigen_dense_map = all_of<is_template_base_of<Eigen::DenseBase, T>, std::is_base_of<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>>;
55template <typename T> using is_eigen_mutable_map = std::is_base_of<Eigen::MapBase<T, Eigen::WriteAccessors>, T>;
56template <typename T> using is_eigen_dense_plain = all_of<negation<is_eigen_dense_map<T>>, is_template_base_of<Eigen::PlainObjectBase, T>>;
57template <typename T> using is_eigen_sparse = is_template_base_of<Eigen::SparseMatrixBase, T>;
58// Test for objects inheriting from EigenBase<Derived> that aren't captured by the above.  This
59// basically covers anything that can be assigned to a dense matrix but that don't have a typical
60// matrix data layout that can be copied from their .data().  For example, DiagonalMatrix and
61// SelfAdjointView fall into this category.
62template <typename T> using is_eigen_other = all_of<
63    is_template_base_of<Eigen::EigenBase, T>,
64    negation<any_of<is_eigen_dense_map<T>, is_eigen_dense_plain<T>, is_eigen_sparse<T>>>
65>;
66
67// Captures numpy/eigen conformability status (returned by EigenProps::conformable()):
68template <bool EigenRowMajor> struct EigenConformable {
69    bool conformable = false;
70    EigenIndex rows = 0, cols = 0;
71    EigenDStride stride{0, 0};      // Only valid if negativestrides is false!
72    bool negativestrides = false;   // If true, do not use stride!
73
74    EigenConformable(bool fits = false) : conformable{fits} {}
75    // Matrix type:
76    EigenConformable(EigenIndex r, EigenIndex c,
77            EigenIndex rstride, EigenIndex cstride) :
78        conformable{true}, rows{r}, cols{c} {
79        // TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity. http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747
80        if (rstride < 0 || cstride < 0) {
81            negativestrides = true;
82        } else {
83            stride = {EigenRowMajor ? rstride : cstride /* outer stride */,
84                      EigenRowMajor ? cstride : rstride /* inner stride */ };
85        }
86    }
87    // Vector type:
88    EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride)
89        : EigenConformable(r, c, r == 1 ? c*stride : stride, c == 1 ? r : r*stride) {}
90
91    template <typename props> bool stride_compatible() const {
92        // To have compatible strides, we need (on both dimensions) one of fully dynamic strides,
93        // matching strides, or a dimension size of 1 (in which case the stride value is irrelevant)
94        return
95            !negativestrides &&
96            (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner() ||
97                (EigenRowMajor ? cols : rows) == 1) &&
98            (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer() ||
99                (EigenRowMajor ? rows : cols) == 1);
100    }
101    operator bool() const { return conformable; }
102};
103
104template <typename Type> struct eigen_extract_stride { using type = Type; };
105template <typename PlainObjectType, int MapOptions, typename StrideType>
106struct eigen_extract_stride<Eigen::Map<PlainObjectType, MapOptions, StrideType>> { using type = StrideType; };
107template <typename PlainObjectType, int Options, typename StrideType>
108struct eigen_extract_stride<Eigen::Ref<PlainObjectType, Options, StrideType>> { using type = StrideType; };
109
110// Helper struct for extracting information from an Eigen type
111template <typename Type_> struct EigenProps {
112    using Type = Type_;
113    using Scalar = typename Type::Scalar;
114    using StrideType = typename eigen_extract_stride<Type>::type;
115    static constexpr EigenIndex
116        rows = Type::RowsAtCompileTime,
117        cols = Type::ColsAtCompileTime,
118        size = Type::SizeAtCompileTime;
119    static constexpr bool
120        row_major = Type::IsRowMajor,
121        vector = Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1
122        fixed_rows = rows != Eigen::Dynamic,
123        fixed_cols = cols != Eigen::Dynamic,
124        fixed = size != Eigen::Dynamic, // Fully-fixed size
125        dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size
126
127    template <EigenIndex i, EigenIndex ifzero> using if_zero = std::integral_constant<EigenIndex, i == 0 ? ifzero : i>;
128    static constexpr EigenIndex inner_stride = if_zero<StrideType::InnerStrideAtCompileTime, 1>::value,
129                                outer_stride = if_zero<StrideType::OuterStrideAtCompileTime,
130                                                       vector ? size : row_major ? cols : rows>::value;
131    static constexpr bool dynamic_stride = inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic;
132    static constexpr bool requires_row_major = !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1;
133    static constexpr bool requires_col_major = !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1;
134
135    // Takes an input array and determines whether we can make it fit into the Eigen type.  If
136    // the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector
137    // (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).
138    static EigenConformable<row_major> conformable(const array &a) {
139        const auto dims = a.ndim();
140        if (dims < 1 || dims > 2)
141            return false;
142
143        if (dims == 2) { // Matrix type: require exact match (or dynamic)
144
145            EigenIndex
146                np_rows = a.shape(0),
147                np_cols = a.shape(1),
148                np_rstride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar)),
149                np_cstride = a.strides(1) / static_cast<ssize_t>(sizeof(Scalar));
150            if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols))
151                return false;
152
153            return {np_rows, np_cols, np_rstride, np_cstride};
154        }
155
156        // Otherwise we're storing an n-vector.  Only one of the strides will be used, but whichever
157        // is used, we want the (single) numpy stride value.
158        const EigenIndex n = a.shape(0),
159              stride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar));
160
161        if (vector) { // Eigen type is a compile-time vector
162            if (fixed && size != n)
163                return false; // Vector size mismatch
164            return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride};
165        }
166        else if (fixed) {
167            // The type has a fixed size, but is not a vector: abort
168            return false;
169        }
170        else if (fixed_cols) {
171            // Since this isn't a vector, cols must be != 1.  We allow this only if it exactly
172            // equals the number of elements (rows is Dynamic, and so 1 row is allowed).
173            if (cols != n) return false;
174            return {1, n, stride};
175        }
176        else {
177            // Otherwise it's either fully dynamic, or column dynamic; both become a column vector
178            if (fixed_rows && rows != n) return false;
179            return {n, 1, stride};
180        }
181    }
182
183    static PYBIND11_DESCR descriptor() {
184        constexpr bool show_writeable = is_eigen_dense_map<Type>::value && is_eigen_mutable_map<Type>::value;
185        constexpr bool show_order = is_eigen_dense_map<Type>::value;
186        constexpr bool show_c_contiguous = show_order && requires_row_major;
187        constexpr bool show_f_contiguous = !show_c_contiguous && show_order && requires_col_major;
188
189        return type_descr(_("numpy.ndarray[") + npy_format_descriptor<Scalar>::name() +
190            _("[")  + _<fixed_rows>(_<(size_t) rows>(), _("m")) +
191            _(", ") + _<fixed_cols>(_<(size_t) cols>(), _("n")) +
192            _("]") +
193            // For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to be
194            // satisfied: writeable=True (for a mutable reference), and, depending on the map's stride
195            // options, possibly f_contiguous or c_contiguous.  We include them in the descriptor output
196            // to provide some hint as to why a TypeError is occurring (otherwise it can be confusing to
197            // see that a function accepts a 'numpy.ndarray[float64[3,2]]' and an error message that you
198            // *gave* a numpy.ndarray of the right type and dimensions.
199            _<show_writeable>(", flags.writeable", "") +
200            _<show_c_contiguous>(", flags.c_contiguous", "") +
201            _<show_f_contiguous>(", flags.f_contiguous", "") +
202            _("]")
203        );
204    }
205};
206
207// Casts an Eigen type to numpy array.  If given a base, the numpy array references the src data,
208// otherwise it'll make a copy.  writeable lets you turn off the writeable flag for the array.
209template <typename props> handle eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) {
210    constexpr ssize_t elem_size = sizeof(typename props::Scalar);
211    array a;
212    if (props::vector)
213        a = array({ src.size() }, { elem_size * src.innerStride() }, src.data(), base);
214    else
215        a = array({ src.rows(), src.cols() }, { elem_size * src.rowStride(), elem_size * src.colStride() },
216                  src.data(), base);
217
218    if (!writeable)
219        array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
220
221    return a.release();
222}
223
224// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that
225// reference the Eigen object's data with `base` as the python-registered base class (if omitted,
226// the base will be set to None, and lifetime management is up to the caller).  The numpy array is
227// non-writeable if the given type is const.
228template <typename props, typename Type>
229handle eigen_ref_array(Type &src, handle parent = none()) {
230    // none here is to get past array's should-we-copy detection, which currently always
231    // copies when there is no base.  Setting the base to None should be harmless.
232    return eigen_array_cast<props>(src, parent, !std::is_const<Type>::value);
233}
234
235// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a numpy
236// array that references the encapsulated data with a python-side reference to the capsule to tie
237// its destruction to that of any dependent python objects.  Const-ness is determined by whether or
238// not the Type of the pointer given is const.
239template <typename props, typename Type, typename = enable_if_t<is_eigen_dense_plain<Type>::value>>
240handle eigen_encapsulate(Type *src) {
241    capsule base(src, [](void *o) { delete static_cast<Type *>(o); });
242    return eigen_ref_array<props>(*src, base);
243}
244
245// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense
246// types.
247template<typename Type>
248struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
249    using Scalar = typename Type::Scalar;
250    using props = EigenProps<Type>;
251
252    bool load(handle src, bool convert) {
253        // If we're in no-convert mode, only load if given an array of the correct type
254        if (!convert && !isinstance<array_t<Scalar>>(src))
255            return false;
256
257        // Coerce into an array, but don't do type conversion yet; the copy below handles it.
258        auto buf = array::ensure(src);
259
260        if (!buf)
261            return false;
262
263        auto dims = buf.ndim();
264        if (dims < 1 || dims > 2)
265            return false;
266
267        auto fits = props::conformable(buf);
268        if (!fits)
269            return false;
270
271        // Allocate the new type, then build a numpy reference into it
272        value = Type(fits.rows, fits.cols);
273        auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
274        if (dims == 1) ref = ref.squeeze();
275
276        int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr());
277
278        if (result < 0) { // Copy failed!
279            PyErr_Clear();
280            return false;
281        }
282
283        return true;
284    }
285
286private:
287
288    // Cast implementation
289    template <typename CType>
290    static handle cast_impl(CType *src, return_value_policy policy, handle parent) {
291        switch (policy) {
292            case return_value_policy::take_ownership:
293            case return_value_policy::automatic:
294                return eigen_encapsulate<props>(src);
295            case return_value_policy::move:
296                return eigen_encapsulate<props>(new CType(std::move(*src)));
297            case return_value_policy::copy:
298                return eigen_array_cast<props>(*src);
299            case return_value_policy::reference:
300            case return_value_policy::automatic_reference:
301                return eigen_ref_array<props>(*src);
302            case return_value_policy::reference_internal:
303                return eigen_ref_array<props>(*src, parent);
304            default:
305                throw cast_error("unhandled return_value_policy: should not happen!");
306        };
307    }
308
309public:
310
311    // Normal returned non-reference, non-const value:
312    static handle cast(Type &&src, return_value_policy /* policy */, handle parent) {
313        return cast_impl(&src, return_value_policy::move, parent);
314    }
315    // If you return a non-reference const, we mark the numpy array readonly:
316    static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) {
317        return cast_impl(&src, return_value_policy::move, parent);
318    }
319    // lvalue reference return; default (automatic) becomes copy
320    static handle cast(Type &src, return_value_policy policy, handle parent) {
321        if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
322            policy = return_value_policy::copy;
323        return cast_impl(&src, policy, parent);
324    }
325    // const lvalue reference return; default (automatic) becomes copy
326    static handle cast(const Type &src, return_value_policy policy, handle parent) {
327        if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
328            policy = return_value_policy::copy;
329        return cast(&src, policy, parent);
330    }
331    // non-const pointer return
332    static handle cast(Type *src, return_value_policy policy, handle parent) {
333        return cast_impl(src, policy, parent);
334    }
335    // const pointer return
336    static handle cast(const Type *src, return_value_policy policy, handle parent) {
337        return cast_impl(src, policy, parent);
338    }
339
340    static PYBIND11_DESCR name() { return props::descriptor(); }
341
342    operator Type*() { return &value; }
343    operator Type&() { return value; }
344    operator Type&&() && { return std::move(value); }
345    template <typename T> using cast_op_type = movable_cast_op_type<T>;
346
347private:
348    Type value;
349};
350
351// Eigen Ref/Map classes have slightly different policy requirements, meaning we don't want to force
352// `move` when a Ref/Map rvalue is returned; we treat Ref<> sort of like a pointer (we care about
353// the underlying data, not the outer shell).
354template <typename Return>
355struct return_value_policy_override<Return, enable_if_t<is_eigen_dense_map<Return>::value>> {
356    static return_value_policy policy(return_value_policy p) { return p; }
357};
358
359// Base class for casting reference/map/block/etc. objects back to python.
360template <typename MapType> struct eigen_map_caster {
361private:
362    using props = EigenProps<MapType>;
363
364public:
365
366    // Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has
367    // to stay around), but we'll allow it under the assumption that you know what you're doing (and
368    // have an appropriate keep_alive in place).  We return a numpy array pointing directly at the
369    // ref's data (The numpy array ends up read-only if the ref was to a const matrix type.) Note
370    // that this means you need to ensure you don't destroy the object in some other way (e.g. with
371    // an appropriate keep_alive, or with a reference to a statically allocated matrix).
372    static handle cast(const MapType &src, return_value_policy policy, handle parent) {
373        switch (policy) {
374            case return_value_policy::copy:
375                return eigen_array_cast<props>(src);
376            case return_value_policy::reference_internal:
377                return eigen_array_cast<props>(src, parent, is_eigen_mutable_map<MapType>::value);
378            case return_value_policy::reference:
379            case return_value_policy::automatic:
380            case return_value_policy::automatic_reference:
381                return eigen_array_cast<props>(src, none(), is_eigen_mutable_map<MapType>::value);
382            default:
383                // move, take_ownership don't make any sense for a ref/map:
384                pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type");
385        }
386    }
387
388    static PYBIND11_DESCR name() { return props::descriptor(); }
389
390    // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
391    // types but not bound arguments).  We still provide them (with an explicitly delete) so that
392    // you end up here if you try anyway.
393    bool load(handle, bool) = delete;
394    operator MapType() = delete;
395    template <typename> using cast_op_type = MapType;
396};
397
398// We can return any map-like object (but can only load Refs, specialized next):
399template <typename Type> struct type_caster<Type, enable_if_t<is_eigen_dense_map<Type>::value>>
400    : eigen_map_caster<Type> {};
401
402// Loader for Ref<...> arguments.  See the documentation for info on how to make this work without
403// copying (it requires some extra effort in many cases).
404template <typename PlainObjectType, typename StrideType>
405struct type_caster<
406    Eigen::Ref<PlainObjectType, 0, StrideType>,
407    enable_if_t<is_eigen_dense_map<Eigen::Ref<PlainObjectType, 0, StrideType>>::value>
408> : public eigen_map_caster<Eigen::Ref<PlainObjectType, 0, StrideType>> {
409private:
410    using Type = Eigen::Ref<PlainObjectType, 0, StrideType>;
411    using props = EigenProps<Type>;
412    using Scalar = typename props::Scalar;
413    using MapType = Eigen::Map<PlainObjectType, 0, StrideType>;
414    using Array = array_t<Scalar, array::forcecast |
415                ((props::row_major ? props::inner_stride : props::outer_stride) == 1 ? array::c_style :
416                 (props::row_major ? props::outer_stride : props::inner_stride) == 1 ? array::f_style : 0)>;
417    static constexpr bool need_writeable = is_eigen_mutable_map<Type>::value;
418    // Delay construction (these have no default constructor)
419    std::unique_ptr<MapType> map;
420    std::unique_ptr<Type> ref;
421    // Our array.  When possible, this is just a numpy array pointing to the source data, but
422    // sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an incompatible
423    // layout, or is an array of a type that needs to be converted).  Using a numpy temporary
424    // (rather than an Eigen temporary) saves an extra copy when we need both type conversion and
425    // storage order conversion.  (Note that we refuse to use this temporary copy when loading an
426    // argument for a Ref<M> with M non-const, i.e. a read-write reference).
427    Array copy_or_ref;
428public:
429    bool load(handle src, bool convert) {
430        // First check whether what we have is already an array of the right type.  If not, we can't
431        // avoid a copy (because the copy is also going to do type conversion).
432        bool need_copy = !isinstance<Array>(src);
433
434        EigenConformable<props::row_major> fits;
435        if (!need_copy) {
436            // We don't need a converting copy, but we also need to check whether the strides are
437            // compatible with the Ref's stride requirements
438            Array aref = reinterpret_borrow<Array>(src);
439
440            if (aref && (!need_writeable || aref.writeable())) {
441                fits = props::conformable(aref);
442                if (!fits) return false; // Incompatible dimensions
443                if (!fits.template stride_compatible<props>())
444                    need_copy = true;
445                else
446                    copy_or_ref = std::move(aref);
447            }
448            else {
449                need_copy = true;
450            }
451        }
452
453        if (need_copy) {
454            // We need to copy: If we need a mutable reference, or we're not supposed to convert
455            // (either because we're in the no-convert overload pass, or because we're explicitly
456            // instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
457            if (!convert || need_writeable) return false;
458
459            Array copy = Array::ensure(src);
460            if (!copy) return false;
461            fits = props::conformable(copy);
462            if (!fits || !fits.template stride_compatible<props>())
463                return false;
464            copy_or_ref = std::move(copy);
465            loader_life_support::add_patient(copy_or_ref);
466        }
467
468        ref.reset();
469        map.reset(new MapType(data(copy_or_ref), fits.rows, fits.cols, make_stride(fits.stride.outer(), fits.stride.inner())));
470        ref.reset(new Type(*map));
471
472        return true;
473    }
474
475    operator Type*() { return ref.get(); }
476    operator Type&() { return *ref; }
477    template <typename _T> using cast_op_type = pybind11::detail::cast_op_type<_T>;
478
479private:
480    template <typename T = Type, enable_if_t<is_eigen_mutable_map<T>::value, int> = 0>
481    Scalar *data(Array &a) { return a.mutable_data(); }
482
483    template <typename T = Type, enable_if_t<!is_eigen_mutable_map<T>::value, int> = 0>
484    const Scalar *data(Array &a) { return a.data(); }
485
486    // Attempt to figure out a constructor of `Stride` that will work.
487    // If both strides are fixed, use a default constructor:
488    template <typename S> using stride_ctor_default = bool_constant<
489        S::InnerStrideAtCompileTime != Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic &&
490        std::is_default_constructible<S>::value>;
491    // Otherwise, if there is a two-index constructor, assume it is (outer,inner) like
492    // Eigen::Stride, and use it:
493    template <typename S> using stride_ctor_dual = bool_constant<
494        !stride_ctor_default<S>::value && std::is_constructible<S, EigenIndex, EigenIndex>::value>;
495    // Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use
496    // it (passing whichever stride is dynamic).
497    template <typename S> using stride_ctor_outer = bool_constant<
498        !any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value &&
499        S::OuterStrideAtCompileTime == Eigen::Dynamic && S::InnerStrideAtCompileTime != Eigen::Dynamic &&
500        std::is_constructible<S, EigenIndex>::value>;
501    template <typename S> using stride_ctor_inner = bool_constant<
502        !any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value &&
503        S::InnerStrideAtCompileTime == Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic &&
504        std::is_constructible<S, EigenIndex>::value>;
505
506    template <typename S = StrideType, enable_if_t<stride_ctor_default<S>::value, int> = 0>
507    static S make_stride(EigenIndex, EigenIndex) { return S(); }
508    template <typename S = StrideType, enable_if_t<stride_ctor_dual<S>::value, int> = 0>
509    static S make_stride(EigenIndex outer, EigenIndex inner) { return S(outer, inner); }
510    template <typename S = StrideType, enable_if_t<stride_ctor_outer<S>::value, int> = 0>
511    static S make_stride(EigenIndex outer, EigenIndex) { return S(outer); }
512    template <typename S = StrideType, enable_if_t<stride_ctor_inner<S>::value, int> = 0>
513    static S make_stride(EigenIndex, EigenIndex inner) { return S(inner); }
514
515};
516
517// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not
518// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).
519// load() is not supported, but we can cast them into the python domain by first copying to a
520// regular Eigen::Matrix, then casting that.
521template <typename Type>
522struct type_caster<Type, enable_if_t<is_eigen_other<Type>::value>> {
523protected:
524    using Matrix = Eigen::Matrix<typename Type::Scalar, Type::RowsAtCompileTime, Type::ColsAtCompileTime>;
525    using props = EigenProps<Matrix>;
526public:
527    static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
528        handle h = eigen_encapsulate<props>(new Matrix(src));
529        return h;
530    }
531    static handle cast(const Type *src, return_value_policy policy, handle parent) { return cast(*src, policy, parent); }
532
533    static PYBIND11_DESCR name() { return props::descriptor(); }
534
535    // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
536    // types but not bound arguments).  We still provide them (with an explicitly delete) so that
537    // you end up here if you try anyway.
538    bool load(handle, bool) = delete;
539    operator Type() = delete;
540    template <typename> using cast_op_type = Type;
541};
542
543template<typename Type>
544struct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {
545    typedef typename Type::Scalar Scalar;
546    typedef remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())> StorageIndex;
547    typedef typename Type::Index Index;
548    static constexpr bool rowMajor = Type::IsRowMajor;
549
550    bool load(handle src, bool) {
551        if (!src)
552            return false;
553
554        auto obj = reinterpret_borrow<object>(src);
555        object sparse_module = module::import("scipy.sparse");
556        object matrix_type = sparse_module.attr(
557            rowMajor ? "csr_matrix" : "csc_matrix");
558
559        if (!obj.get_type().is(matrix_type)) {
560            try {
561                obj = matrix_type(obj);
562            } catch (const error_already_set &) {
563                return false;
564            }
565        }
566
567        auto values = array_t<Scalar>((object) obj.attr("data"));
568        auto innerIndices = array_t<StorageIndex>((object) obj.attr("indices"));
569        auto outerIndices = array_t<StorageIndex>((object) obj.attr("indptr"));
570        auto shape = pybind11::tuple((pybind11::object) obj.attr("shape"));
571        auto nnz = obj.attr("nnz").cast<Index>();
572
573        if (!values || !innerIndices || !outerIndices)
574            return false;
575
576        value = Eigen::MappedSparseMatrix<Scalar, Type::Flags, StorageIndex>(
577            shape[0].cast<Index>(), shape[1].cast<Index>(), nnz,
578            outerIndices.mutable_data(), innerIndices.mutable_data(), values.mutable_data());
579
580        return true;
581    }
582
583    static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
584        const_cast<Type&>(src).makeCompressed();
585
586        object matrix_type = module::import("scipy.sparse").attr(
587            rowMajor ? "csr_matrix" : "csc_matrix");
588
589        array data(src.nonZeros(), src.valuePtr());
590        array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr());
591        array innerIndices(src.nonZeros(), src.innerIndexPtr());
592
593        return matrix_type(
594            std::make_tuple(data, innerIndices, outerIndices),
595            std::make_pair(src.rows(), src.cols())
596        ).release();
597    }
598
599    PYBIND11_TYPE_CASTER(Type, _<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[", "scipy.sparse.csc_matrix[")
600            + npy_format_descriptor<Scalar>::name() + _("]"));
601};
602
603NAMESPACE_END(detail)
604NAMESPACE_END(PYBIND11_NAMESPACE)
605
606#if defined(__GNUG__) || defined(__clang__)
607#  pragma GCC diagnostic pop
608#elif defined(_MSC_VER)
609#  pragma warning(pop)
610#endif
611