Skip to content

Commit

Permalink
v0.4.4 Tidied up some compiler warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
samuel-watson committed Jun 30, 2023
1 parent 1f110ff commit 574a076
Show file tree
Hide file tree
Showing 11 changed files with 50 additions and 78 deletions.
48 changes: 10 additions & 38 deletions inst/include/glmmr/calculator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@ class calculator {

calculator& operator= (const glmmr::calculator& calc);
VectorXd linear_predictor(const dblvec& parameters,const MatrixXd& data);
VectorXd first_derivative(int i,const dblvec& parameters,const MatrixXd& data,double extraData = 0);
MatrixXd second_derivative(int i,const dblvec& parameters,const MatrixXd& data, double extraData = 0);
MatrixXd jacobian(const dblvec& parameters, const MatrixXd& data);
MatrixXd jacobian(const dblvec& parameters,const MatrixXd& data,const VectorXd& extraData);
MatrixXd jacobian(const dblvec& parameters,const MatrixXd& data,const MatrixXd& extraData);
Expand All @@ -39,8 +37,6 @@ class calculator {

}



inline VectorXd glmmr::calculator::linear_predictor(const dblvec& parameters,
const MatrixXd& data){
int n = data.rows();
Expand All @@ -56,7 +52,6 @@ inline glmmr::calculator& glmmr::calculator::operator= (const glmmr::calculator&
instructions = calc.instructions;
indexes = calc.indexes;
parameter_names = calc.parameter_names;
//var_par = calc.var_par;
variance.conservativeResize(calc.variance.size());
variance = calc.variance;
data_count = calc.data_count;
Expand All @@ -65,41 +60,17 @@ inline glmmr::calculator& glmmr::calculator::operator= (const glmmr::calculator&
return *this;
};

inline VectorXd glmmr::calculator::first_derivative(int i,
const dblvec& parameters,
const MatrixXd& data,
double extraData){
dblvec out = calculate(i,parameters,data,0,1,extraData);
VectorXd d = Map<VectorXd, Unaligned>(out.data()+1, out.size()-1);
return d;
};

inline MatrixXd glmmr::calculator::second_derivative(int i,
const dblvec& parameters,
const MatrixXd& data,
double extraData){
dblvec out = calculate(i,parameters,data,0,2, extraData);
MatrixXd h(parameter_count, parameter_count);
int index_count = parameter_count+1;
for(int j = 0; j < parameter_count; j++){
for(int k = j; k < parameter_count; k++){
h(k,j) = out[index_count];
if(j != k) h(j,k) = h(k,j);
index_count++;
}
}
return h;
};

inline MatrixXd glmmr::calculator::jacobian(const dblvec& parameters,
const MatrixXd& data){
int n = data.rows();
if(n==0)Rcpp::stop("No data initialised in calculator");
MatrixXd J(n,parameter_count);
J.setZero();
#pragma omp parallel for
for(int i = 0; i<n ; i++){
J.row(i) = (first_derivative(i,parameters,data)).transpose();
dblvec out = calculate(i,parameters,data,0,1,0);
for(int j = 0; j<parameter_count; j++){
J(i,j) = out[j+1];
}
}
return J;
};
Expand All @@ -110,10 +81,12 @@ inline MatrixXd glmmr::calculator::jacobian(const dblvec& parameters,
int n = data.rows();
if(n==0)Rcpp::stop("No data initialised in calculator");
MatrixXd J(n,parameter_count);
J.setZero();
#pragma omp parallel for
for(int i = 0; i<n ; i++){
J.row(i) = (first_derivative(i,parameters,data,extraData(i))).transpose();
dblvec out = calculate(i,parameters,data,0,1,extraData(i));
for(int j = 0; j<parameter_count; j++){
J(i,j) = out[j+1];
}
}
return J;
};
Expand Down Expand Up @@ -241,8 +214,7 @@ inline dblvec glmmr::calculator::calculate(const int i,
}
};


for(int k = 0; k < instructions.size(); k++){
for(unsigned int k = 0; k < instructions.size(); k++){
switch(instructions[k]){
case 0:
{
Expand Down Expand Up @@ -281,7 +253,7 @@ inline dblvec glmmr::calculator::calculate(const int i,
{
// push parameter
//DEBUG
if(idx_iter >= indexes.size())Rcpp::stop("Index out of range: case 2 idx iter: "+std::to_string(idx_iter)+" versus "+std::to_string(indexes.size()));
if((unsigned)idx_iter >= indexes.size())Rcpp::stop("Index out of range: case 2 idx iter: "+std::to_string(idx_iter)+" versus "+std::to_string(indexes.size()));
if(indexes[idx_iter] >= parameter_count)Rcpp::stop("Index out of range: case 2 indexes: "+std::to_string(indexes[idx_iter])+" versus "+std::to_string(parameter_count));

stack.push(parameters[indexes[idx_iter]]);
Expand Down
40 changes: 20 additions & 20 deletions inst/include/glmmr/covariance.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ inline void glmmr::Covariance::parse(){
dblvec3d re_temp_data_;

// now process each step of the random effect terms
if(colnames_.size()!= data_.cols())Rcpp::stop("colnames length != data columns");
if(colnames_.size()!= (unsigned int)data_.cols())Rcpp::stop("colnames length != data columns");

int nre = form_.re_.size();

Expand Down Expand Up @@ -208,7 +208,7 @@ inline void glmmr::Covariance::parse(){
dblvec2d groups;
dblvec vals;
bool isgr;
int j,k,idx,zcol;
unsigned int j,k,idx,zcol;
if(form_.z_[i].compare("1")==0){
zcol = -1;
} else {
Expand Down Expand Up @@ -261,9 +261,9 @@ inline void glmmr::Covariance::parse(){
// for each group, create a new z data including the group

int fn_var_counter = 0;
for(int m = 0; m < fnvars.size(); m++){
for(unsigned int m = 0; m < fnvars.size(); m++){
intvec iter_fn_var_index;
for(int p = 0; p < fnvars[m].size(); p++){
for(unsigned int p = 0; p < fnvars[m].size(); p++){
iter_fn_var_index.push_back(p + fn_var_counter);
}
fn_var_counter += fnvars[m].size();
Expand Down Expand Up @@ -344,15 +344,15 @@ inline void glmmr::Covariance::parse(){

//now build the reverse polish notation
int nvarfn;
for(int i =0; i<fn_.size();i++){
for(unsigned int i =0; i<fn_.size();i++){
intvec fn_instruct;
intvec fn_par;
int minvalue = 100;
for(int j = 0; j<fn_[i].size();j++){
for(unsigned int j = 0; j<fn_[i].size();j++){
auto min_value_iterator = std::min_element(re_pars_[i][j].begin(),re_pars_[i][j].end());
if(*min_value_iterator < minvalue) minvalue = *min_value_iterator;
}
for(int j = 0; j<fn_[i].size();j++){
for(unsigned int j = 0; j<fn_[i].size();j++){
intvec A;
if(fn_[i][j]!="gr"){
nvarfn = re_cols_[i][j].size();
Expand All @@ -366,7 +366,7 @@ inline void glmmr::Covariance::parse(){
}
intvec B = glmmr::interpret_re(fn_[i][j],A);
intvec re_par_less_min_ = re_pars_[i][j];
for(int k = 0; k < re_pars_[i][j].size(); k++)re_par_less_min_[k] -= minvalue;
for(unsigned int k = 0; k < re_pars_[i][j].size(); k++)re_par_less_min_[k] -= minvalue;



Expand All @@ -375,7 +375,7 @@ inline void glmmr::Covariance::parse(){
fn_par.insert(fn_par.end(),Bpar.begin(),Bpar.end());
}
if(fn_[i].size() > 1){
for(int j = 0; j < (fn_[i].size()-1); j++){
for(unsigned int j = 0; j < (fn_[i].size()-1); j++){
fn_instruct.push_back(5);
}
}
Expand All @@ -387,16 +387,16 @@ inline void glmmr::Covariance::parse(){

//get the number of random effects
Q_ = 0;
for(int i = 0; i < calc_.size(); i++){
for(unsigned int i = 0; i < calc_.size(); i++){
Q_ += re_temp_data_[i].size();
}
re_count_.resize(form_.re_terms().size());
std::fill(re_count_.begin(), re_count_.end(), 0);
for(int i = 0; i < calc_.size(); i++){
for(unsigned int i = 0; i < calc_.size(); i++){
re_count_[re_order_[i]] += re_temp_data_[i].size();
MatrixXd newredata(re_temp_data_[i].size(),re_temp_data_[i][0].size());
for(int j = 0; j < re_temp_data_[i].size(); j++){
for(int k = 0; k < re_temp_data_[i][0].size(); k++){
for(unsigned int j = 0; j < re_temp_data_[i].size(); j++){
for(unsigned int k = 0; k < re_temp_data_[i][0].size(); k++){
newredata(j,k) = re_temp_data_[i][j][k];
}
}
Expand All @@ -410,8 +410,8 @@ inline void glmmr::Covariance::update_parameters_in_calculators(){
if(par_for_calcs_.size()==0)par_for_calcs_.resize(B_);
for(int i = 0; i < B_; i++){
dblvec par_for_calc;
for(int j = 0; j < re_pars_[i].size(); j++){
for(int k = 0; k < re_pars_[i][j].size(); k++){
for(unsigned int j = 0; j < re_pars_[i].size(); j++){
for(unsigned int k = 0; k < re_pars_[i][j].size(); k++){
par_for_calc.push_back(parameters_[re_pars_[i][j][k]]);
}
}
Expand Down Expand Up @@ -493,12 +493,12 @@ inline void glmmr::Covariance::update_parameters_extern(const dblvec& parameters

inline void glmmr::Covariance::update_parameters(const ArrayXd& parameters){
if(parameters_.size()==0){
for(int i = 0; i < parameters.size(); i++){
for(unsigned int i = 0; i < parameters.size(); i++){
parameters_.push_back(parameters(i));
}
update_parameters_in_calculators();
} else if(parameters_.size() == parameters.size()){
for(int i = 0; i < parameters.size(); i++){
for(unsigned int i = 0; i < parameters.size(); i++){
parameters_[i] = parameters(i);
}
update_parameters_in_calculators();
Expand Down Expand Up @@ -820,8 +820,8 @@ inline MatrixXd glmmr::Covariance::D_sparse_builder(bool chol,

inline bool glmmr::Covariance::any_group_re(){
bool gr = false;
for(int i = 0; i < fn_.size(); i++){
for(int j = 0; j < fn_[i].size(); j++){
for(unsigned int i = 0; i < fn_.size(); i++){
for(unsigned int j = 0; j < fn_[i].size(); j++){
if(fn_[i][j]=="gr"){
gr = true;
break;
Expand All @@ -839,7 +839,7 @@ inline strvec glmmr::Covariance::parameter_names(){
//}
//auto last = std::unique(parnames.begin(),parnames.end());
//parnames.erase(last, parnames.end());
for(int i = 0; i < form_.re_.size(); i++){
for(unsigned int i = 0; i < form_.re_.size(); i++){
for(int j = 0; j < B_; j++){
if(re_order_[j]==i){
parnames.insert(parnames.end(),calc_[j].parameter_names.begin(),calc_[j].parameter_names.end());
Expand Down
2 changes: 1 addition & 1 deletion inst/include/glmmr/formula.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ inline void glmmr::Formula::tokenise(){
cursor++;
}
if(linear_predictor_.back()=='+')linear_predictor_.pop_back();
for(int i =0; i<re_.size(); i++){
for(unsigned int i =0; i<re_.size(); i++){
re_order_.push_back(i);
}
re_terms_ = re_;
Expand Down
4 changes: 2 additions & 2 deletions inst/include/glmmr/formulaparse.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ inline bool parse_formula(std::vector<char>& formula,
if(col_idx != colnames.end()){
str s1_parname = "b_" + s1_as_str;
s1.push_back('*');
for(int j = 0; j < s1_parname.size(); j++){
for(unsigned int j = 0; j < s1_parname.size(); j++){
s1.push_back(s1_parname[j]);
}
}
Expand All @@ -63,7 +63,7 @@ inline bool parse_formula(std::vector<char>& formula,
if(col_idx != colnames.end()){
str s2_parname = "b_" + s2_as_str;
s2.push_back('*');
for(int j = 0; j < s2_parname.size(); j++){
for(unsigned int j = 0; j < s2_parname.size(); j++){
s2.push_back(s2_parname[j]);
}
}
Expand Down
2 changes: 1 addition & 1 deletion inst/include/glmmr/linearpredictor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class LinearPredictor {
};

void update_parameters(const dblvec& parameters_){
if(parameters.size()!=P())Rcpp::stop("wrong number of parameters");
if(parameters.size()!=(unsigned)P())Rcpp::stop("wrong number of parameters");
parameters = parameters_;
if(!x_set){
X_ = calc.jacobian(parameters,Xdata);
Expand Down
2 changes: 1 addition & 1 deletion inst/include/glmmr/maths.h
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ inline double log_likelihood(double y,
double mu,
double var_par,
int flink) {
double logl;
double logl = 0;

switch (flink){
case 1:
Expand Down
6 changes: 3 additions & 3 deletions inst/include/glmmr/model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,19 +53,19 @@ inline void glmmr::Model::set_y(const VectorXd& y_){
}

inline void glmmr::Model::update_beta(const dblvec &beta_){
if(beta_.size()!=model.linear_predictor.P())Rcpp::stop("beta wrong length");
if(beta_.size()!=(unsigned)model.linear_predictor.P())Rcpp::stop("beta wrong length");
model.linear_predictor.update_parameters(beta_);
}

inline void glmmr::Model::update_theta(const dblvec &theta_){
if(theta_.size()!=model.covariance.npar())Rcpp::stop("theta wrong length");
if(theta_.size()!=(unsigned)model.covariance.npar())Rcpp::stop("theta wrong length");
model.covariance.update_parameters(theta_);
re.ZL = model.covariance.ZL_sparse();
re.zu_ = re.ZL*re.u_;
}

inline void glmmr::Model::update_u(const MatrixXd &u_){
if(u_.rows()!=model.covariance.Q())Rcpp::stop("u has wrong number of random effects");
if(u_.rows()!=(unsigned)model.covariance.Q())Rcpp::stop("u has wrong number of random effects");
if(u_.cols()!=re.u(false).cols()){
Rcpp::Rcout << "\nDifferent numbers of random effect samples";
re.u_.conservativeResize(model.covariance.Q(),u_.cols());
Expand Down
4 changes: 2 additions & 2 deletions inst/include/glmmr/modelmatrix.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ inline MatrixXd glmmr::ModelMatrix::information_matrix_by_block(int b){
inline MatrixXd glmmr::ModelMatrix::information_matrix(){
W.update();
MatrixXd M = MatrixXd::Zero(model.linear_predictor.P(),model.linear_predictor.P());
for(int i = 0; i< sigma_blocks.size(); i++){
for(unsigned int i = 0; i< sigma_blocks.size(); i++){
M += information_matrix_by_block(i);
}
return M;
Expand Down Expand Up @@ -126,7 +126,7 @@ inline MatrixXd glmmr::ModelMatrix::Sigma(bool inverse){

inline MatrixXd glmmr::ModelMatrix::sigma_block(int b,
bool inverse){
if(b >= sigma_blocks.size())Rcpp::stop("Index out of range");
if((unsigned)b >= sigma_blocks.size())Rcpp::stop("Index out of range");
sparse ZLs = submat_sparse(model.covariance.ZL_sparse(),sigma_blocks[b].RowIndexes);
MatrixXd ZL = sparse_to_dense(ZLs,false);
MatrixXd S = ZL * ZL.transpose();
Expand Down
14 changes: 7 additions & 7 deletions inst/include/glmmr/modeloptim.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ inline void glmmr::ModelOptim::update_beta(const VectorXd &beta){
}

inline void glmmr::ModelOptim::update_theta(const dblvec &theta){
if(theta.size()!=model.covariance.npar())Rcpp::stop("theta wrong length");
if(theta.size()!=(unsigned)model.covariance.npar())Rcpp::stop("theta wrong length");
model.covariance.update_parameters(theta);
re.ZL = model.covariance.ZL_sparse();
re.zu_ = re.ZL*re.u_;
Expand Down Expand Up @@ -575,7 +575,7 @@ inline ArrayXd glmmr::ModelOptim::optimum_weights(double N,
Rcpp::Rcout << "\n### Preparing data ###";
Rcpp::Rcout << "\nThere are " << SB.size() << " independent blocks and " << model.n() << " cells.";
int maxprint = model.n() < 10 ? model.n() : 10;
for(int i = 0 ; i < SB.size(); i++){
for(unsigned int i = 0 ; i < SB.size(); i++){
sparse ZLs = submat_sparse(model.covariance.ZL_sparse(),SB[i].RowIndexes);
MatrixXd ZL = sparse_to_dense(ZLs,false);
MatrixXd S = ZL * ZL.transpose();
Expand All @@ -596,7 +596,7 @@ inline ArrayXd glmmr::ModelOptim::optimum_weights(double N,
Rcpp::Rcout << "\nIteration " << iter << "\n------------\nweights: [" << weights.segment(0,maxprint).transpose() << " ...]";
//add check to remove weights that are below a certain threshold
if((weights < 1e-8).any()){
for(int i = 0 ; i < SB.size(); i++){
for(unsigned int i = 0 ; i < SB.size(); i++){
auto it = SB[i].RowIndexes.begin();
while(it != SB[i].RowIndexes.end()){
if(weights(*it) < 1e-8){
Expand All @@ -616,7 +616,7 @@ inline ArrayXd glmmr::ModelOptim::optimum_weights(double N,
}

M.setZero();
for(int i = 0 ; i < SB.size(); i++){
for(unsigned int i = 0 ; i < SB.size(); i++){
Sigmas[i] = ZDZ[i];
for(int j = 0; j < Sigmas[i].rows(); j++){
Sigmas[i](j,j) += sigma_sq/(N*weights(SB[i].RowIndexes[j]));
Expand All @@ -635,7 +635,7 @@ inline ArrayXd glmmr::ModelOptim::optimum_weights(double N,
for(int j = 0; j < M_row_sums.size(); j++){
if(M_row_sums(j) == 0){
Rcpp::Rcout << "\n Removing column " << fake_it;
for(int k = 0; k < Xs.size(); k++){
for(unsigned int k = 0; k < Xs.size(); k++){
glmmr::Eigen_ext::removeColumn(Xs[k],fake_it);
}
glmmr::Eigen_ext::removeElement(Cvec,fake_it);
Expand All @@ -646,14 +646,14 @@ inline ArrayXd glmmr::ModelOptim::optimum_weights(double N,
}
M.conservativeResize(M.rows()-countZero,M.cols()-countZero);
M.setZero();
for(int k = 0; k < SB.size(); k++){
for(unsigned int k = 0; k < SB.size(); k++){
M += Xs[k].transpose() * Sigmas[k] * Xs[k];
}
}
M = M.llt().solve(MatrixXd::Identity(M.rows(),M.cols()));
VectorXd Mc = M*Cvec;
weightsnew.setZero();
for(int i = 0 ; i < SB.size(); i++){
for(unsigned int i = 0 ; i < SB.size(); i++){
block_size = SB[i].RowIndexes.size();
holder.segment(0,block_size) = Sigmas[i] * Xs[i] * Mc;
for(int j = 0; j < block_size; j++){
Expand Down
Loading

0 comments on commit 574a076

Please sign in to comment.