Skip to content

[DO NOT MERGE - Debug] Windows segfault debug #1621

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 38 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
ca89f78
Initial implementation
Dec 14, 2019
de964ba
Merge branch 'develop' into feature/vec_gen_design
Dec 15, 2019
6b54c52
Add forwarding, rev & fwd versions
Dec 16, 2019
eebfd95
Merge branch 'develop' into feature/vec_gen_design
Dec 17, 2019
2b085d1
Add autodiff tests, remove arr versions
Dec 17, 2019
ac3d5e3
Nested testing
Dec 24, 2019
77f3a59
Fix tests, update doc
Dec 26, 2019
b3a1132
Tidy doc
Dec 27, 2019
27d1b1f
Merge branch 'develop' into feature/vec_gen_design
Dec 27, 2019
a4b83a7
Cpplint
Dec 27, 2019
d97c963
Tidy missing doc
Dec 29, 2019
461f0b0
log_softmax doc errors
Dec 29, 2019
cd0a362
Merge branch 'develop' into feature/vec_gen_design
Dec 29, 2019
9fa5124
[Jenkins] auto-formatting by clang-format version 5.0.0-3~16.04.1 (ta…
stan-buildbot Dec 29, 2019
cba3a30
Fix failing test
Dec 30, 2019
e5a6b28
Merge develop
Dec 30, 2019
5a934fe
Revert head replacement
Dec 31, 2019
b7b2171
Merge branch 'develop' into feature/vec_gen_design
Dec 31, 2019
8ebea40
[Jenkins] auto-formatting by clang-format version 5.0.0-3~16.04.1 (ta…
stan-buildbot Dec 31, 2019
a73be6d
Merge commit '426ad8fe5a2858b9d367aade1b25a631ac5e97e8' into merge_af…
rok-cesnovar Jan 5, 2020
8b5cc7f
Merge commit 'd7eb73884e5fad18eaf323760e4625317e1c4c91' into merge_af…
rok-cesnovar Jan 5, 2020
df34056
Merge commit '2b2f7ddff32c12e1e253a6179bf81c1845962306' into merge_af…
rok-cesnovar Jan 5, 2020
8a7017a
Merge commit '731b5f8cf6566db4f13a06851d56cc9e54029146' into merge_af…
rok-cesnovar Jan 5, 2020
8214c93
Merge branch 'develop' into merge_after_flatten
rok-cesnovar Jan 5, 2020
d776eac
merge conflicts fix
rok-cesnovar Jan 5, 2020
09c4004
[Jenkins] auto-formatting by clang-format version 5.0.0-3~16.04.1 (ta…
stan-buildbot Jan 5, 2020
a0eb3df
fix header guard
rok-cesnovar Jan 5, 2020
4e83afb
remove include
rok-cesnovar Jan 5, 2020
2e4f6b1
Merge branch 'develop' into feature/vec_gen_design
Jan 10, 2020
febffbe
Address review comments
Jan 11, 2020
67e23b8
Merge branch 'develop' into feature/vec_gen_design
Jan 11, 2020
477cf9f
[Jenkins] auto-formatting by clang-format version 5.0.0-3~16.04.1 (ta…
stan-buildbot Jan 11, 2020
02afdb9
Fix merge error
Jan 11, 2020
8d8539a
cpplint
Jan 11, 2020
1fce5ac
Merge branch 'develop' into feature/vec_gen_design
Jan 13, 2020
f3b3286
Address comments
Jan 13, 2020
ebd9051
remove steps for debug
rok-cesnovar Jan 16, 2020
95a9372
Merge remote-tracking branch 'andrjohns/feature/vec_gen_design' into …
rok-cesnovar Jan 16, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
175 changes: 0 additions & 175 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -115,181 +115,6 @@ pipeline {
}
}
}
stage('Linting & Doc checks') {
agent any
steps {
script {
deleteDir()
retry(3) { checkout scm }
sh "git clean -xffd"
stash 'MathSetup'
sh "echo CXX=${env.CXX} -Werror > make/local"
sh "echo BOOST_PARALLEL_JOBS=${env.PARALLEL} >> make/local"
parallel(
CppLint: { sh "make cpplint" },
Dependencies: { sh """#!/bin/bash
set -o pipefail
make test-math-dependencies 2>&1 | tee dependencies.log""" } ,
Documentation: { sh "make doxygen" },
)
}
}
post {
always {
recordIssues enabledForFailure: true, tools:
[cppLint(),
groovyScript(parserId: 'mathDependencies', pattern: '**/dependencies.log')]
deleteDir()
}
}
}
stage('Headers checks') {
parallel {
stage('Headers check') {
agent any
steps {
deleteDir()
unstash 'MathSetup'
sh "echo CXX=${env.CXX} -Werror > make/local"
sh "make -j${env.PARALLEL} test-headers"
}
post { always { deleteDir() } }
}
stage('Headers check with OpenCL') {
agent { label "gpu" }
steps {
deleteDir()
unstash 'MathSetup'
sh "echo CXX=${env.CXX} -Werror > make/local"
sh "echo STAN_OPENCL=true>> make/local"
sh "echo OPENCL_PLATFORM_ID=0>> make/local"
sh "echo OPENCL_DEVICE_ID=${OPENCL_DEVICE_ID}>> make/local"
sh "make -j${env.PARALLEL} test-headers"
}
post { always { deleteDir() } }
}
}
}
stage('Always-run tests part 1') {
parallel {
stage('Linux Unit with MPI') {
agent { label 'linux && mpi' }
steps {
deleteDir()
unstash 'MathSetup'
sh "echo CXX=${MPICXX} >> make/local"
sh "echo CXX_TYPE=gcc >> make/local"
sh "echo STAN_MPI=true >> make/local"
runTests("test/unit")
}
post { always { retry(3) { deleteDir() } } }
}
stage('Full unit with GPU') {
agent { label "gpu" }
steps {
deleteDir()
unstash 'MathSetup'
sh "echo CXX=${env.CXX} -Werror > make/local"
sh "echo STAN_OPENCL=true>> make/local"
sh "echo OPENCL_PLATFORM_ID=0>> make/local"
sh "echo OPENCL_DEVICE_ID=${OPENCL_DEVICE_ID}>> make/local"
runTests("test/unit")
}
post { always { retry(3) { deleteDir() } } }
}
}
}
stage('Always-run tests part 2') {
parallel {
stage('Distribution tests') {
agent { label "distribution-tests" }
steps {
deleteDir()
unstash 'MathSetup'
sh """
echo CXX=${env.CXX} > make/local
echo O=0 >> make/local
echo N_TESTS=${env.N_TESTS} >> make/local
"""
script {
if (params.withRowVector || isBranch('develop') || isBranch('master')) {
sh "echo CXXFLAGS+=-DSTAN_TEST_ROW_VECTORS >> make/local"
}
}
sh "./runTests.py -j${env.PARALLEL} test/prob > dist.log 2>&1"
}
post {
always {
script { zip zipFile: "dist.log.zip", archive: true, glob: 'dist.log' }
retry(3) { deleteDir() }
}
failure {
echo "Distribution tests failed. Check out dist.log.zip artifact for test logs."
}
}
}
stage('Threading tests') {
agent any
steps {
deleteDir()
unstash 'MathSetup'
sh "echo CXX=${env.CXX} -Werror > make/local"
sh "echo CPPFLAGS+=-DSTAN_THREADS >> make/local"
runTests("test/unit -f thread")
sh "find . -name *_test.xml | xargs rm"
runTests("test/unit -f map_rect")
}
post { always { retry(3) { deleteDir() } } }
}
stage('Windows Headers & Unit') {
agent { label 'windows' }
steps {
deleteDirWin()
unstash 'MathSetup'
bat "mingw32-make -j${env.PARALLEL} test-headers"
runTestsWin("test/unit")
}
}
stage('Windows Threading') {
agent { label 'windows' }
steps {
deleteDirWin()
unstash 'MathSetup'
bat "echo CXX=${env.CXX} -Werror > make/local"
bat "echo CXXFLAGS+=-DSTAN_THREADS >> make/local"
runTestsWin("test/unit -f thread")
runTestsWin("test/unit -f map_rect")
}
}
}
}
stage('Additional merge tests') {
when { anyOf { branch 'develop'; branch 'master' } }
parallel {
stage('Linux Unit with Threading') {
agent { label 'linux' }
steps {
deleteDir()
unstash 'MathSetup'
sh "echo CXX=${GCC} >> make/local"
sh "echo CXXFLAGS=-DSTAN_THREADS >> make/local"
runTests("test/unit")
}
post { always { retry(3) { deleteDir() } } }
}
stage('Mac Unit with Threading') {
agent { label 'osx' }
steps {
deleteDir()
unstash 'MathSetup'
sh "echo CC=${env.CXX} -Werror > make/local"
sh "echo CXXFLAGS+=-DSTAN_THREADS >> make/local"
runTests("test/unit")
}
post { always { retry(3) { deleteDir() } } }
}
}
}
stage('Upstream tests') {
when { expression { env.BRANCH_NAME ==~ /PR-\d+/ } }
steps {
Expand Down
66 changes: 35 additions & 31 deletions stan/math/fwd/fun/log_softmax.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,44 +6,48 @@
#include <stan/math/prim/fun/Eigen.hpp>
#include <stan/math/prim/fun/log_softmax.hpp>
#include <stan/math/prim/fun/softmax.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_vector_unary.hpp>

namespace stan {
namespace math {

template <typename T>
inline Eigen::Matrix<fvar<T>, Eigen::Dynamic, 1> log_softmax(
const Eigen::Matrix<fvar<T>, Eigen::Dynamic, 1>& alpha) {
using Eigen::Dynamic;
using Eigen::Matrix;

Matrix<T, Dynamic, 1> alpha_t(alpha.size());
for (int k = 0; k < alpha.size(); ++k) {
alpha_t(k) = alpha(k).val_;
}

Matrix<T, Dynamic, 1> softmax_alpha_t = softmax(alpha_t);
Matrix<T, Dynamic, 1> log_softmax_alpha_t = log_softmax(alpha_t);

Matrix<fvar<T>, Dynamic, 1> log_softmax_alpha(alpha.size());
for (int k = 0; k < alpha.size(); ++k) {
log_softmax_alpha(k).val_ = log_softmax_alpha_t(k);
log_softmax_alpha(k).d_ = 0;
}

for (int m = 0; m < alpha.size(); ++m) {
T negative_alpha_m_d_times_softmax_alpha_t_m
= -alpha(m).d_ * softmax_alpha_t(m);
for (int k = 0; k < alpha.size(); ++k) {
if (m == k) {
log_softmax_alpha(k).d_
+= alpha(m).d_ + negative_alpha_m_d_times_softmax_alpha_t_m;
} else {
log_softmax_alpha(k).d_ += negative_alpha_m_d_times_softmax_alpha_t_m;
/**
* Return the log softmax of the specified vector or container of vectors.
*
* @tparam T Type of input vector or matrix.
* @param[in] x Unconstrained input vector.
* @return Softmax of the input.
* @throw std::domain_error If the input vector is size 0.
*/
template <typename T, require_t<is_fvar<scalar_type_t<T>>>...>
inline auto log_softmax(const T& x) {
return apply_vector_unary<T>::apply(x, [&](const auto& alpha) {
using T_fvar = value_type_t<decltype(alpha)>;
using T_fvar_inner = typename T_fvar::Scalar;

Eigen::Matrix<T_fvar_inner, -1, 1> alpha_t = alpha.val();
Eigen::Matrix<T_fvar_inner, -1, 1> softmax_alpha_t = softmax(alpha_t);

Eigen::Matrix<T_fvar, -1, 1> log_softmax_alpha(alpha.size());
log_softmax_alpha.val() = log_softmax(alpha_t);
log_softmax_alpha.d().setZero();

for (int m = 0; m < alpha.size(); ++m) {
T_fvar_inner negative_alpha_m_d_times_softmax_alpha_t_m
= -alpha(m).d_ * softmax_alpha_t(m);
for (int k = 0; k < alpha.size(); ++k) {
if (m == k) {
log_softmax_alpha(k).d_
+= alpha(m).d_ + negative_alpha_m_d_times_softmax_alpha_t_m;
} else {
log_softmax_alpha(k).d_ += negative_alpha_m_d_times_softmax_alpha_t_m;
}
}
}
}

return log_softmax_alpha;
return log_softmax_alpha;
});
}

} // namespace math
Expand Down
55 changes: 27 additions & 28 deletions stan/math/fwd/fun/log_sum_exp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <stan/math/prim/fun/Eigen.hpp>
#include <stan/math/prim/fun/constants.hpp>
#include <stan/math/prim/fun/log_sum_exp.hpp>
#include <stan/math/prim/vectorize/apply_vector_unary.hpp>
#include <cmath>
#include <vector>

Expand All @@ -31,37 +32,35 @@ inline fvar<T> log_sum_exp(double x1, const fvar<T>& x2) {

template <typename T>
inline fvar<T> log_sum_exp(const fvar<T>& x1, double x2) {
using std::exp;
if (x2 == NEGATIVE_INFTY) {
return fvar<T>(x1.val_, x1.d_);
}
return fvar<T>(log_sum_exp(x1.val_, x2), x1.d_ / (1 + exp(x2 - x1.val_)));
}

template <typename T>
fvar<T> log_sum_exp(const std::vector<fvar<T> >& v) {
using std::exp;
std::vector<T> vals(v.size());
for (size_t i = 0; i < v.size(); ++i) {
vals[i] = v[i].val_;
}
T deriv(0.0);
T denominator(0.0);
for (size_t i = 0; i < v.size(); ++i) {
T exp_vi = exp(vals[i]);
denominator += exp_vi;
deriv += v[i].d_ * exp_vi;
}
return fvar<T>(log_sum_exp(vals), deriv / denominator);
return log_sum_exp(x2, x1);
}

template <typename T, int R, int C>
fvar<T> log_sum_exp(const Eigen::Matrix<fvar<T>, R, C>& v) {
Eigen::Matrix<T, R, C> vals = v.val();
Eigen::Matrix<T, R, C> exp_vals = vals.array().exp();
/**
* Return the log of the sum of the exponentiated values of the specified
* matrix of values. The matrix may be a full matrix, a vector,
* a row vector, or a container of these.
*
* The function is defined as follows to prevent overflow in exponential
* calculations.
*
* \f$\log \sum_{n=1}^N \exp(x_n) = \max(x) + \log \sum_{n=1}^N \exp(x_n -
* \max(x))\f$.
*
* @tparam T Type of input vector or matrix.
* @param[in] x Matrix of specified values.
* @return The log of the sum of the exponentiated vector values.
*/
template <typename T, require_t<is_fvar<scalar_type_t<T>>>...>
inline auto log_sum_exp(const T& x) {
return apply_vector_unary<T>::reduce(x, [&](const auto& v) {
using T_fvar_inner = typename value_type_t<decltype(v)>::Scalar;
using mat_type = Eigen::Matrix<T_fvar_inner, -1, -1>;
mat_type vals = v.val();
mat_type exp_vals = vals.array().exp();

return fvar<T>(log_sum_exp(vals),
v.d().cwiseProduct(exp_vals).sum() / exp_vals.sum());
return fvar<T_fvar_inner>(
log_sum_exp(vals), v.d().cwiseProduct(exp_vals).sum() / exp_vals.sum());
});
}

} // namespace math
Expand Down
20 changes: 10 additions & 10 deletions stan/math/prim/fun/log_softmax.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include <stan/math/prim/err.hpp>
#include <stan/math/prim/fun/Eigen.hpp>
#include <stan/math/prim/fun/log_sum_exp.hpp>
#include <stan/math/prim/vectorize/apply_vector_unary.hpp>

namespace stan {
namespace math {
Expand Down Expand Up @@ -32,18 +33,17 @@ namespace math {
* \right.
* \f$
*
* @tparam T type of elements in the vector
* @param[in] v Vector to transform.
* @return Unit simplex result of the softmax transform of the vector.
* @tparam T Type of input vector to transform.
* @param[in] x Vector to transform.
* @return log unit simplex result of the softmax transform of the vector.
*/
template <typename T>
inline Eigen::Matrix<T, Eigen::Dynamic, 1> log_softmax(
const Eigen::Matrix<T, Eigen::Dynamic, 1>& v) {
check_nonzero_size("log_softmax", "v", v);
return v.array() - log_sum_exp(v);
template <typename T, require_t<std::is_arithmetic<scalar_type_t<T>>>...>
inline auto log_softmax(const T& x) {
return apply_vector_unary<T>::apply(x, [&](const auto& v) {
check_nonzero_size("log_softmax", "v", v);
return (v.array() - log_sum_exp(v)).matrix();
});
}

} // namespace math
} // namespace stan

#endif
Loading