diff --git a/inst/include/glmmr/calculator.hpp b/inst/include/glmmr/calculator.hpp index 77cc5eb..328eb78 100644 --- a/inst/include/glmmr/calculator.hpp +++ b/inst/include/glmmr/calculator.hpp @@ -28,8 +28,6 @@ class calculator { calculator& operator= (const glmmr::calculator& calc); VectorXd linear_predictor(const dblvec& parameters,const MatrixXd& data); - VectorXd first_derivative(int i,const dblvec& parameters,const MatrixXd& data,double extraData = 0); - MatrixXd second_derivative(int i,const dblvec& parameters,const MatrixXd& data, double extraData = 0); MatrixXd jacobian(const dblvec& parameters, const MatrixXd& data); MatrixXd jacobian(const dblvec& parameters,const MatrixXd& data,const VectorXd& extraData); MatrixXd jacobian(const dblvec& parameters,const MatrixXd& data,const MatrixXd& extraData); @@ -39,8 +37,6 @@ class calculator { } - - inline VectorXd glmmr::calculator::linear_predictor(const dblvec& parameters, const MatrixXd& data){ int n = data.rows(); @@ -56,7 +52,6 @@ inline glmmr::calculator& glmmr::calculator::operator= (const glmmr::calculator& instructions = calc.instructions; indexes = calc.indexes; parameter_names = calc.parameter_names; - //var_par = calc.var_par; variance.conservativeResize(calc.variance.size()); variance = calc.variance; data_count = calc.data_count; @@ -65,41 +60,17 @@ inline glmmr::calculator& glmmr::calculator::operator= (const glmmr::calculator& return *this; }; -inline VectorXd glmmr::calculator::first_derivative(int i, - const dblvec& parameters, - const MatrixXd& data, - double extraData){ - dblvec out = calculate(i,parameters,data,0,1,extraData); - VectorXd d = Map(out.data()+1, out.size()-1); - return d; -}; - -inline MatrixXd glmmr::calculator::second_derivative(int i, - const dblvec& parameters, - const MatrixXd& data, - double extraData){ - dblvec out = calculate(i,parameters,data,0,2, extraData); - MatrixXd h(parameter_count, parameter_count); - int index_count = parameter_count+1; - for(int j = 0; j < parameter_count; j++){ - for(int k = j; k < parameter_count; k++){ - h(k,j) = out[index_count]; - if(j != k) h(j,k) = h(k,j); - index_count++; - } - } - return h; -}; - inline MatrixXd glmmr::calculator::jacobian(const dblvec& parameters, const MatrixXd& data){ int n = data.rows(); if(n==0)Rcpp::stop("No data initialised in calculator"); MatrixXd J(n,parameter_count); - J.setZero(); #pragma omp parallel for for(int i = 0; i= indexes.size())Rcpp::stop("Index out of range: case 2 idx iter: "+std::to_string(idx_iter)+" versus "+std::to_string(indexes.size())); + if((unsigned)idx_iter >= indexes.size())Rcpp::stop("Index out of range: case 2 idx iter: "+std::to_string(idx_iter)+" versus "+std::to_string(indexes.size())); if(indexes[idx_iter] >= parameter_count)Rcpp::stop("Index out of range: case 2 indexes: "+std::to_string(indexes[idx_iter])+" versus "+std::to_string(parameter_count)); stack.push(parameters[indexes[idx_iter]]); diff --git a/inst/include/glmmr/covariance.hpp b/inst/include/glmmr/covariance.hpp index d9c6c0a..95d539f 100644 --- a/inst/include/glmmr/covariance.hpp +++ b/inst/include/glmmr/covariance.hpp @@ -158,7 +158,7 @@ inline void glmmr::Covariance::parse(){ dblvec3d re_temp_data_; // now process each step of the random effect terms - if(colnames_.size()!= data_.cols())Rcpp::stop("colnames length != data columns"); + if(colnames_.size()!= (unsigned int)data_.cols())Rcpp::stop("colnames length != data columns"); int nre = form_.re_.size(); @@ -208,7 +208,7 @@ inline void glmmr::Covariance::parse(){ dblvec2d groups; dblvec vals; bool isgr; - int j,k,idx,zcol; + unsigned int j,k,idx,zcol; if(form_.z_[i].compare("1")==0){ zcol = -1; } else { @@ -261,9 +261,9 @@ inline void glmmr::Covariance::parse(){ // for each group, create a new z data including the group int fn_var_counter = 0; - for(int m = 0; m < fnvars.size(); m++){ + for(unsigned int m = 0; m < fnvars.size(); m++){ intvec iter_fn_var_index; - for(int p = 0; p < fnvars[m].size(); p++){ + for(unsigned int p = 0; p < fnvars[m].size(); p++){ iter_fn_var_index.push_back(p + fn_var_counter); } fn_var_counter += fnvars[m].size(); @@ -344,15 +344,15 @@ inline void glmmr::Covariance::parse(){ //now build the reverse polish notation int nvarfn; - for(int i =0; i 1){ - for(int j = 0; j < (fn_[i].size()-1); j++){ + for(unsigned int j = 0; j < (fn_[i].size()-1); j++){ fn_instruct.push_back(5); } } @@ -387,16 +387,16 @@ inline void glmmr::Covariance::parse(){ //get the number of random effects Q_ = 0; - for(int i = 0; i < calc_.size(); i++){ + for(unsigned int i = 0; i < calc_.size(); i++){ Q_ += re_temp_data_[i].size(); } re_count_.resize(form_.re_terms().size()); std::fill(re_count_.begin(), re_count_.end(), 0); - for(int i = 0; i < calc_.size(); i++){ + for(unsigned int i = 0; i < calc_.size(); i++){ re_count_[re_order_[i]] += re_temp_data_[i].size(); MatrixXd newredata(re_temp_data_[i].size(),re_temp_data_[i][0].size()); - for(int j = 0; j < re_temp_data_[i].size(); j++){ - for(int k = 0; k < re_temp_data_[i][0].size(); k++){ + for(unsigned int j = 0; j < re_temp_data_[i].size(); j++){ + for(unsigned int k = 0; k < re_temp_data_[i][0].size(); k++){ newredata(j,k) = re_temp_data_[i][j][k]; } } @@ -410,8 +410,8 @@ inline void glmmr::Covariance::update_parameters_in_calculators(){ if(par_for_calcs_.size()==0)par_for_calcs_.resize(B_); for(int i = 0; i < B_; i++){ dblvec par_for_calc; - for(int j = 0; j < re_pars_[i].size(); j++){ - for(int k = 0; k < re_pars_[i][j].size(); k++){ + for(unsigned int j = 0; j < re_pars_[i].size(); j++){ + for(unsigned int k = 0; k < re_pars_[i][j].size(); k++){ par_for_calc.push_back(parameters_[re_pars_[i][j][k]]); } } @@ -493,12 +493,12 @@ inline void glmmr::Covariance::update_parameters_extern(const dblvec& parameters inline void glmmr::Covariance::update_parameters(const ArrayXd& parameters){ if(parameters_.size()==0){ - for(int i = 0; i < parameters.size(); i++){ + for(unsigned int i = 0; i < parameters.size(); i++){ parameters_.push_back(parameters(i)); } update_parameters_in_calculators(); } else if(parameters_.size() == parameters.size()){ - for(int i = 0; i < parameters.size(); i++){ + for(unsigned int i = 0; i < parameters.size(); i++){ parameters_[i] = parameters(i); } update_parameters_in_calculators(); @@ -820,8 +820,8 @@ inline MatrixXd glmmr::Covariance::D_sparse_builder(bool chol, inline bool glmmr::Covariance::any_group_re(){ bool gr = false; - for(int i = 0; i < fn_.size(); i++){ - for(int j = 0; j < fn_[i].size(); j++){ + for(unsigned int i = 0; i < fn_.size(); i++){ + for(unsigned int j = 0; j < fn_[i].size(); j++){ if(fn_[i][j]=="gr"){ gr = true; break; @@ -839,7 +839,7 @@ inline strvec glmmr::Covariance::parameter_names(){ //} //auto last = std::unique(parnames.begin(),parnames.end()); //parnames.erase(last, parnames.end()); - for(int i = 0; i < form_.re_.size(); i++){ + for(unsigned int i = 0; i < form_.re_.size(); i++){ for(int j = 0; j < B_; j++){ if(re_order_[j]==i){ parnames.insert(parnames.end(),calc_[j].parameter_names.begin(),calc_[j].parameter_names.end()); diff --git a/inst/include/glmmr/formula.hpp b/inst/include/glmmr/formula.hpp index 00494d4..6dedf95 100644 --- a/inst/include/glmmr/formula.hpp +++ b/inst/include/glmmr/formula.hpp @@ -88,7 +88,7 @@ inline void glmmr::Formula::tokenise(){ cursor++; } if(linear_predictor_.back()=='+')linear_predictor_.pop_back(); - for(int i =0; i& formula, if(col_idx != colnames.end()){ str s1_parname = "b_" + s1_as_str; s1.push_back('*'); - for(int j = 0; j < s1_parname.size(); j++){ + for(unsigned int j = 0; j < s1_parname.size(); j++){ s1.push_back(s1_parname[j]); } } @@ -63,7 +63,7 @@ inline bool parse_formula(std::vector& formula, if(col_idx != colnames.end()){ str s2_parname = "b_" + s2_as_str; s2.push_back('*'); - for(int j = 0; j < s2_parname.size(); j++){ + for(unsigned int j = 0; j < s2_parname.size(); j++){ s2.push_back(s2_parname[j]); } } diff --git a/inst/include/glmmr/linearpredictor.hpp b/inst/include/glmmr/linearpredictor.hpp index 9490aa6..824fc26 100644 --- a/inst/include/glmmr/linearpredictor.hpp +++ b/inst/include/glmmr/linearpredictor.hpp @@ -71,7 +71,7 @@ class LinearPredictor { }; void update_parameters(const dblvec& parameters_){ - if(parameters.size()!=P())Rcpp::stop("wrong number of parameters"); + if(parameters.size()!=(unsigned)P())Rcpp::stop("wrong number of parameters"); parameters = parameters_; if(!x_set){ X_ = calc.jacobian(parameters,Xdata); diff --git a/inst/include/glmmr/maths.h b/inst/include/glmmr/maths.h index 259aebe..721c830 100644 --- a/inst/include/glmmr/maths.h +++ b/inst/include/glmmr/maths.h @@ -286,7 +286,7 @@ inline double log_likelihood(double y, double mu, double var_par, int flink) { - double logl; + double logl = 0; switch (flink){ case 1: diff --git a/inst/include/glmmr/model.hpp b/inst/include/glmmr/model.hpp index 1c9d7c0..75cfab0 100644 --- a/inst/include/glmmr/model.hpp +++ b/inst/include/glmmr/model.hpp @@ -53,19 +53,19 @@ inline void glmmr::Model::set_y(const VectorXd& y_){ } inline void glmmr::Model::update_beta(const dblvec &beta_){ - if(beta_.size()!=model.linear_predictor.P())Rcpp::stop("beta wrong length"); + if(beta_.size()!=(unsigned)model.linear_predictor.P())Rcpp::stop("beta wrong length"); model.linear_predictor.update_parameters(beta_); } inline void glmmr::Model::update_theta(const dblvec &theta_){ - if(theta_.size()!=model.covariance.npar())Rcpp::stop("theta wrong length"); + if(theta_.size()!=(unsigned)model.covariance.npar())Rcpp::stop("theta wrong length"); model.covariance.update_parameters(theta_); re.ZL = model.covariance.ZL_sparse(); re.zu_ = re.ZL*re.u_; } inline void glmmr::Model::update_u(const MatrixXd &u_){ - if(u_.rows()!=model.covariance.Q())Rcpp::stop("u has wrong number of random effects"); + if(u_.rows()!=(unsigned)model.covariance.Q())Rcpp::stop("u has wrong number of random effects"); if(u_.cols()!=re.u(false).cols()){ Rcpp::Rcout << "\nDifferent numbers of random effect samples"; re.u_.conservativeResize(model.covariance.Q(),u_.cols()); diff --git a/inst/include/glmmr/modelmatrix.hpp b/inst/include/glmmr/modelmatrix.hpp index 2c36e2d..7e24dfc 100644 --- a/inst/include/glmmr/modelmatrix.hpp +++ b/inst/include/glmmr/modelmatrix.hpp @@ -57,7 +57,7 @@ inline MatrixXd glmmr::ModelMatrix::information_matrix_by_block(int b){ inline MatrixXd glmmr::ModelMatrix::information_matrix(){ W.update(); MatrixXd M = MatrixXd::Zero(model.linear_predictor.P(),model.linear_predictor.P()); - for(int i = 0; i< sigma_blocks.size(); i++){ + for(unsigned int i = 0; i< sigma_blocks.size(); i++){ M += information_matrix_by_block(i); } return M; @@ -126,7 +126,7 @@ inline MatrixXd glmmr::ModelMatrix::Sigma(bool inverse){ inline MatrixXd glmmr::ModelMatrix::sigma_block(int b, bool inverse){ - if(b >= sigma_blocks.size())Rcpp::stop("Index out of range"); + if((unsigned)b >= sigma_blocks.size())Rcpp::stop("Index out of range"); sparse ZLs = submat_sparse(model.covariance.ZL_sparse(),sigma_blocks[b].RowIndexes); MatrixXd ZL = sparse_to_dense(ZLs,false); MatrixXd S = ZL * ZL.transpose(); diff --git a/inst/include/glmmr/modeloptim.hpp b/inst/include/glmmr/modeloptim.hpp index 97ad7ea..c7f7ac9 100644 --- a/inst/include/glmmr/modeloptim.hpp +++ b/inst/include/glmmr/modeloptim.hpp @@ -154,7 +154,7 @@ inline void glmmr::ModelOptim::update_beta(const VectorXd &beta){ } inline void glmmr::ModelOptim::update_theta(const dblvec &theta){ - if(theta.size()!=model.covariance.npar())Rcpp::stop("theta wrong length"); + if(theta.size()!=(unsigned)model.covariance.npar())Rcpp::stop("theta wrong length"); model.covariance.update_parameters(theta); re.ZL = model.covariance.ZL_sparse(); re.zu_ = re.ZL*re.u_; @@ -575,7 +575,7 @@ inline ArrayXd glmmr::ModelOptim::optimum_weights(double N, Rcpp::Rcout << "\n### Preparing data ###"; Rcpp::Rcout << "\nThere are " << SB.size() << " independent blocks and " << model.n() << " cells."; int maxprint = model.n() < 10 ? model.n() : 10; - for(int i = 0 ; i < SB.size(); i++){ + for(unsigned int i = 0 ; i < SB.size(); i++){ sparse ZLs = submat_sparse(model.covariance.ZL_sparse(),SB[i].RowIndexes); MatrixXd ZL = sparse_to_dense(ZLs,false); MatrixXd S = ZL * ZL.transpose(); @@ -596,7 +596,7 @@ inline ArrayXd glmmr::ModelOptim::optimum_weights(double N, Rcpp::Rcout << "\nIteration " << iter << "\n------------\nweights: [" << weights.segment(0,maxprint).transpose() << " ...]"; //add check to remove weights that are below a certain threshold if((weights < 1e-8).any()){ - for(int i = 0 ; i < SB.size(); i++){ + for(unsigned int i = 0 ; i < SB.size(); i++){ auto it = SB[i].RowIndexes.begin(); while(it != SB[i].RowIndexes.end()){ if(weights(*it) < 1e-8){ @@ -616,7 +616,7 @@ inline ArrayXd glmmr::ModelOptim::optimum_weights(double N, } M.setZero(); - for(int i = 0 ; i < SB.size(); i++){ + for(unsigned int i = 0 ; i < SB.size(); i++){ Sigmas[i] = ZDZ[i]; for(int j = 0; j < Sigmas[i].rows(); j++){ Sigmas[i](j,j) += sigma_sq/(N*weights(SB[i].RowIndexes[j])); @@ -635,7 +635,7 @@ inline ArrayXd glmmr::ModelOptim::optimum_weights(double N, for(int j = 0; j < M_row_sums.size(); j++){ if(M_row_sums(j) == 0){ Rcpp::Rcout << "\n Removing column " << fake_it; - for(int k = 0; k < Xs.size(); k++){ + for(unsigned int k = 0; k < Xs.size(); k++){ glmmr::Eigen_ext::removeColumn(Xs[k],fake_it); } glmmr::Eigen_ext::removeElement(Cvec,fake_it); @@ -646,14 +646,14 @@ inline ArrayXd glmmr::ModelOptim::optimum_weights(double N, } M.conservativeResize(M.rows()-countZero,M.cols()-countZero); M.setZero(); - for(int k = 0; k < SB.size(); k++){ + for(unsigned int k = 0; k < SB.size(); k++){ M += Xs[k].transpose() * Sigmas[k] * Xs[k]; } } M = M.llt().solve(MatrixXd::Identity(M.rows(),M.cols())); VectorXd Mc = M*Cvec; weightsnew.setZero(); - for(int i = 0 ; i < SB.size(); i++){ + for(unsigned int i = 0 ; i < SB.size(); i++){ block_size = SB[i].RowIndexes.size(); holder.segment(0,block_size) = Sigmas[i] * Xs[i] * Mc; for(int j = 0; j < block_size; j++){ diff --git a/inst/include/glmmr/sparse.h b/inst/include/glmmr/sparse.h index 81d40e7..7334e83 100644 --- a/inst/include/glmmr/sparse.h +++ b/inst/include/glmmr/sparse.h @@ -86,7 +86,7 @@ inline ArrayXd operator*(const sparse& A, const ArrayXd& B){ // multiplication of sparse and diagonal of a vector inline sparse operator%(const sparse& A, const VectorXd& x){ sparse Ax(A); - for(int i = 0; i < A.Ax.size(); i++){ + for(unsigned int i = 0; i < A.Ax.size(); i++){ Ax.Ax[i] *= x(Ax.Ai[i]); } return Ax; @@ -96,7 +96,7 @@ inline sparse submat_sparse(const sparse& A, intvec rows){ sparse B; B.n = rows.size(); B.m = A.m; - for(int i = 0; i < rows.size(); i++){ + for(unsigned int i = 0; i < rows.size(); i++){ B.Ap.push_back(B.Ai.size()); for(int j = A.Ap[rows[i]]; j < A.Ap[rows[i]+1]; j++){ B.Ai.push_back(A.Ai[j]); diff --git a/src/Makevars b/src/Makevars index 1693392..c54f336 100644 --- a/src/Makevars +++ b/src/Makevars @@ -1,4 +1,4 @@ CXX_STD = CXX17 -PKG_CPPFLAGS = -I../inst/include/ +PKG_CPPFLAGS = "-I../inst/include/" PKG_CXXFLAGS = $(SHLIB_OPENMP_CXXFLAGS) PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) \ No newline at end of file