Skip to content

Commit

Permalink
v0.4.3 Bug fixes and refactored code
Browse files Browse the repository at this point in the history
  • Loading branch information
samuel-watson committed Jun 27, 2023
1 parent 1028dfb commit cd669ba
Show file tree
Hide file tree
Showing 11 changed files with 80 additions and 84 deletions.
2 changes: 1 addition & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ Package: glmmrBase
Type: Package
Title: Generalised Linear Mixed Models in R
Version: 0.4.3
Date: 2023-06-25
Date: 2023-06-27
Authors@R: c(person("Sam", "Watson", email = "[email protected]",
role = c("aut", "cre")))
Description: Specification, analysis, simulation, and fitting of generalised linear mixed models.
Expand Down
8 changes: 6 additions & 2 deletions R/R6Model.R
Original file line number Diff line number Diff line change
Expand Up @@ -1273,9 +1273,10 @@ Model <- R6::R6Class("Model",
self$mean$.__enclos_env__$private$hash))
},
ptr = NULL,
bitsptr = NULL,
set_y = function(y){
if(is.null(private$ptr))private$update_ptr()
Model__set_y(y)
Model__set_y(private$ptr,y)
},
update_ptr = function(){
if(is.null(private$ptr)){
Expand All @@ -1291,7 +1292,10 @@ Model <- R6::R6Class("Model",
data <- cbind(data,self$mean$data[,cnames])
}
if(self$family[[1]]=="bernoulli" & any(self$trials>1))self$family[[1]] <- "binomial"
private$ptr <- Model__new(form,as.matrix(data),colnames(data),tolower(self$family[[1]]),self$family[[2]])
private$bitsptr <- ModelBits__new(form,as.matrix(data),colnames(data),
tolower(self$family[[1]]),self$family[[2]],
self$mean$parameters,self$covariance$parameters)
private$ptr <- Model__new_from_bits(private$bitsptr)
Model__set_offset(private$ptr,self$mean$offset)
Model__set_weights(private$ptr,self$weights)
Model__set_var_par(private$ptr,self$var_par)
Expand Down
8 changes: 2 additions & 6 deletions R/RcppExports.R
Original file line number Diff line number Diff line change
Expand Up @@ -113,12 +113,8 @@ ModelBits__update_theta <- function(xp, theta_) {
invisible(.Call(`_glmmrBase_ModelBits__update_theta`, xp, theta_))
}

Model__new <- function(bitsptr_) {
.Call(`_glmmrBase_Model__new`, bitsptr_)
}

RE__new <- function(formula_, data_, colnames_, family_, link_, beta_, theta_) {
invisible(.Call(`_glmmrBase_RE__new`, formula_, data_, colnames_, family_, link_, beta_, theta_))
Model__new_from_bits <- function(bptr_) {
.Call(`_glmmrBase_Model__new_from_bits`, bptr_)
}

Model__set_y <- function(xp, y_) {
Expand Down
11 changes: 6 additions & 5 deletions inst/include/glmmr/modelextradata.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,26 +24,27 @@ class ModelExtraData{
variance.setConstant(1.0);
}
void set_offset(const VectorXd& offset_){
offset.conservativeResize(offset_.size());
if(offset_.size()!=offset.size())offset.conservativeResize(offset_.size());
offset = offset_;
}
void set_weights(const ArrayXd& weights_){
weights.conservativeResize(weights_.size());
if(weights_.size()!=weights.size())weights.conservativeResize(weights_.size());
weights = weights_;
}
void set_weights(int n){
weights.conservativeResize(n);
if(weights.size()!=n)weights.conservativeResize(n);
weights.setConstant(1.0);
}
void set_variance(const ArrayXd& variance_){
variance.conservativeResize(variance_.size());
if(variance_.size()!=variance.size())variance.conservativeResize(variance_.size());
variance = variance_;
}
void set_var_par(double var_par_){
var_par = var_par_;
variance.setConstant(var_par_);
}
void update_y(const VectorXd& y_){
y.conservativeResize(y_.size());
if(y_.size()!=y.size())y.conservativeResize(y_.size());
y = y_;
}
};
Expand Down
8 changes: 4 additions & 4 deletions inst/include/glmmr/modelmatrix.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -183,10 +183,10 @@ inline MatrixXd glmmr::ModelMatrix::observed_information_matrix(){
//result *= (1.0/iter);
//return result;
W.update();
MatrixXd XtXW = (model.linear_predictor.X()).transpose() * W.W().asDiagonal() * model.linear_predictor.X();
MatrixXd ZL = sparse_to_dense(re.ZL,false);
MatrixXd XtWZL = (model.linear_predictor.X()).transpose() * W.W().asDiagonal() * ZL;
MatrixXd ZLWLZ = ZL.transpose() * W.W().asDiagonal() * ZL;
MatrixXd XtXW = (model.linear_predictor.X()).transpose() * W.W_.asDiagonal() * model.linear_predictor.X();
MatrixXd ZL = model.covariance.ZL();
MatrixXd XtWZL = (model.linear_predictor.X()).transpose() * W.W_.asDiagonal() * ZL;
MatrixXd ZLWLZ = ZL.transpose() * W.W_.asDiagonal() * ZL;
ZLWLZ += MatrixXd::Identity(model.covariance.Q(),model.covariance.Q());
MatrixXd infomat(model.linear_predictor.P()+model.covariance.Q(),model.linear_predictor.P()+model.covariance.Q());
infomat.topLeftCorner(model.linear_predictor.P(),model.linear_predictor.P()) = XtXW;
Expand Down
16 changes: 7 additions & 9 deletions inst/include/glmmr/modelmcmc.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class ModelMCMC{

inline double glmmr::ModelMCMC::log_prob(const VectorXd &v){
VectorXd zu = re.ZL * v;
VectorXd mu = model.xb().matrix() + re.Zu();
VectorXd mu = model.xb().matrix() + zu;
double lp1 = 0;
double lp2 = 0;
if(model.weighted){
Expand Down Expand Up @@ -91,9 +91,9 @@ inline double glmmr::ModelMCMC::log_prob(const VectorXd &v){
}

inline VectorXd glmmr::ModelMCMC::new_proposal(const VectorXd& u0_,
bool adapt_,
int iter_,
double runif_){
bool adapt_,
int iter_,
double runif_){
Rcpp::NumericVector z = Rcpp::rnorm(model.covariance.Q());
VectorXd r = Rcpp::as<Map<VectorXd> >(z);
VectorXd grad = matrix.log_gradient(u0_,false);
Expand Down Expand Up @@ -153,8 +153,6 @@ inline void glmmr::ModelMCMC::sample(int warmup_,
accept = 0;
std::minstd_rand gen(std::random_device{}());
std::uniform_real_distribution<double> dist(0.0, 1.0);
if(nsamp_!=re.u(false).cols())re.u_.conservativeResize(model.covariance.Q(),nsamp_);
re.u_.setZero();
int totalsamps = nsamp_ + warmup_;
int i;
double prob;
Expand All @@ -172,7 +170,6 @@ inline void glmmr::ModelMCMC::sample(int warmup_,
}
}
re.u_.col(0) = unew;
int iter = 1;
//sampling
for(i = 0; i < nsamp_-1; i++){
prob = dist(gen);
Expand All @@ -183,14 +180,15 @@ inline void glmmr::ModelMCMC::sample(int warmup_,
}
if(trace>0)Rcpp::Rcout << "\nAccept rate: " << (double)accept/(warmup_+nsamp_) << " steps: " << steps << " step size: " << e;
if(verbose)Rcpp::Rcout << "\n" << std::string(40, '-');
// return samples_.matrix();//.array();//remove L
}

inline void glmmr::ModelMCMC::mcmc_sample(int warmup_,
int samples_,
int adapt_){
if(re.u_.cols()!=samples_)re.u_.conservativeResize(NoChange,samples_);
if(re.u_.cols()!=re.zu_.cols())re.zu_.conservativeResize(NoChange,samples_);
re.u_.setZero();
sample(warmup_,samples_,adapt_);
if(re.u_.cols()!=re.Zu().cols())re.zu_.conservativeResize(model.covariance.Q(),re.u_.cols());
re.zu_ = re.ZL*re.u_;
}

Expand Down
23 changes: 18 additions & 5 deletions inst/include/glmmr/modeloptim.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ class ModelOptim{

void update_beta(const dblvec &beta);
void update_theta(const dblvec &theta);
void update_beta(const VectorXd &beta);
void update_theta(const VectorXd &theta);
void update_u(const MatrixXd& u);
double log_likelihood();
double full_log_likelihood();
Expand Down Expand Up @@ -147,13 +149,24 @@ inline void glmmr::ModelOptim::update_beta(const dblvec &beta){
model.linear_predictor.update_parameters(beta);
}

inline void glmmr::ModelOptim::update_beta(const VectorXd &beta){
model.linear_predictor.update_parameters(beta.array());
}

inline void glmmr::ModelOptim::update_theta(const dblvec &theta){
if(theta.size()!=model.covariance.npar())Rcpp::stop("theta wrong length");
model.covariance.update_parameters(theta);
re.ZL = model.covariance.ZL_sparse();
re.zu_ = re.ZL*re.u_;
}

inline void glmmr::ModelOptim::update_theta(const VectorXd &theta){
if(theta.size()!=model.covariance.npar())Rcpp::stop("theta wrong length");
model.covariance.update_parameters(theta.array());
re.ZL = model.covariance.ZL_sparse();
re.zu_ = re.ZL*re.u_;
}

inline void glmmr::ModelOptim::update_u(const MatrixXd &u_){
if(u_.rows()!=model.covariance.Q())Rcpp::stop("u has wrong number of random effects");
if(u_.cols()!=re.u(false).cols()){
Expand Down Expand Up @@ -322,7 +335,7 @@ inline void glmmr::ModelOptim::nr_beta(){
XtWXm = XtWXm.inverse();
VectorXd Wum = Wu.rowwise().mean();
VectorXd bincr = XtWXm * (model.linear_predictor.X().transpose()) * Wum;
for(int i = 0; i < model.linear_predictor.P(); i++)model.linear_predictor.parameters[i] += bincr(i);
update_beta(model.linear_predictor.parameter_vector() + bincr);
}
calculate_var_par();
}
Expand All @@ -344,13 +357,13 @@ inline void glmmr::ModelOptim::laplace_nr_beta_u(){
w = w.cwiseProduct(resid.matrix());
VectorXd params(model.linear_predictor.P()+model.covariance.Q());
params.head(model.linear_predictor.P()) = model.linear_predictor.parameter_vector();
params.tail(model.covariance.Q()) = re.u(false).col(0);
params.tail(model.covariance.Q()) = re.u_.col(0);
VectorXd pderiv(model.linear_predictor.P()+model.covariance.Q());
pderiv.head(model.linear_predictor.P()) = (model.linear_predictor.X()).transpose() * w;
pderiv.tail(model.covariance.Q()) = matrix.log_gradient(re.u(false).col(0));
pderiv.tail(model.covariance.Q()) = matrix.log_gradient(re.u_.col(0));
params += infomat*pderiv;
for(int i = 0; i < model.linear_predictor.P(); i++)model.linear_predictor.parameters[i] += params(i);
for(int i = 0; i < model.covariance.Q(); i++)re.u_(i,0) += params(model.linear_predictor.P()+i);
update_beta(params.head(model.linear_predictor.P()));
update_u(params.tail(model.covariance.Q()));
calculate_var_par();
}

Expand Down
2 changes: 1 addition & 1 deletion inst/include/glmmr/randomeffects.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class RandomEffects{
RandomEffects(glmmr::ModelBits& model_) :
ZL(model_.n(),model_.covariance.Q()),
u_(MatrixXd::Zero(model_.covariance.Q(),1)),
zu_(model_.n(),1), model(model_) {};
zu_(model_.n(),1), model(model_) { if(model.covariance.parameters_.size()>0)ZL = model.covariance.ZL_sparse();};
MatrixXd Zu(){return zu_;};
MatrixXd u(bool scaled = true);
vector_matrix predict_re(const ArrayXXd& newdata_,const ArrayXd& newoffset_);
Expand Down
29 changes: 6 additions & 23 deletions src/RcppExports.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,33 +329,17 @@ BEGIN_RCPP
return R_NilValue;
END_RCPP
}
// Model__new
SEXP Model__new(SEXP bitsptr_);
RcppExport SEXP _glmmrBase_Model__new(SEXP bitsptr_SEXP) {
// Model__new_from_bits
SEXP Model__new_from_bits(SEXP bptr_);
RcppExport SEXP _glmmrBase_Model__new_from_bits(SEXP bptr_SEXP) {
BEGIN_RCPP
Rcpp::RObject rcpp_result_gen;
Rcpp::RNGScope rcpp_rngScope_gen;
Rcpp::traits::input_parameter< SEXP >::type bitsptr_(bitsptr_SEXP);
rcpp_result_gen = Rcpp::wrap(Model__new(bitsptr_));
Rcpp::traits::input_parameter< SEXP >::type bptr_(bptr_SEXP);
rcpp_result_gen = Rcpp::wrap(Model__new_from_bits(bptr_));
return rcpp_result_gen;
END_RCPP
}
// RE__new
void RE__new(SEXP formula_, SEXP data_, SEXP colnames_, SEXP family_, SEXP link_, SEXP beta_, SEXP theta_);
RcppExport SEXP _glmmrBase_RE__new(SEXP formula_SEXP, SEXP data_SEXP, SEXP colnames_SEXP, SEXP family_SEXP, SEXP link_SEXP, SEXP beta_SEXP, SEXP theta_SEXP) {
BEGIN_RCPP
Rcpp::RNGScope rcpp_rngScope_gen;
Rcpp::traits::input_parameter< SEXP >::type formula_(formula_SEXP);
Rcpp::traits::input_parameter< SEXP >::type data_(data_SEXP);
Rcpp::traits::input_parameter< SEXP >::type colnames_(colnames_SEXP);
Rcpp::traits::input_parameter< SEXP >::type family_(family_SEXP);
Rcpp::traits::input_parameter< SEXP >::type link_(link_SEXP);
Rcpp::traits::input_parameter< SEXP >::type beta_(beta_SEXP);
Rcpp::traits::input_parameter< SEXP >::type theta_(theta_SEXP);
RE__new(formula_, data_, colnames_, family_, link_, beta_, theta_);
return R_NilValue;
END_RCPP
}
// Model__set_y
void Model__set_y(SEXP xp, SEXP y_);
RcppExport SEXP _glmmrBase_Model__set_y(SEXP xpSEXP, SEXP y_SEXP) {
Expand Down Expand Up @@ -1074,8 +1058,7 @@ static const R_CallMethodDef CallEntries[] = {
{"_glmmrBase_ModelBits__new", (DL_FUNC) &_glmmrBase_ModelBits__new, 7},
{"_glmmrBase_ModelBits__update_beta", (DL_FUNC) &_glmmrBase_ModelBits__update_beta, 2},
{"_glmmrBase_ModelBits__update_theta", (DL_FUNC) &_glmmrBase_ModelBits__update_theta, 2},
{"_glmmrBase_Model__new", (DL_FUNC) &_glmmrBase_Model__new, 1},
{"_glmmrBase_RE__new", (DL_FUNC) &_glmmrBase_RE__new, 7},
{"_glmmrBase_Model__new_from_bits", (DL_FUNC) &_glmmrBase_Model__new_from_bits, 1},
{"_glmmrBase_Model__set_y", (DL_FUNC) &_glmmrBase_Model__set_y, 2},
{"_glmmrBase_Model__set_offset", (DL_FUNC) &_glmmrBase_Model__set_offset, 2},
{"_glmmrBase_Model__set_weights", (DL_FUNC) &_glmmrBase_Model__set_weights, 2},
Expand Down
21 changes: 5 additions & 16 deletions src/model_module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,28 +34,17 @@ void ModelBits__update_theta(SEXP xp, SEXP theta_){
}

// [[Rcpp::export]]
SEXP Model__new(SEXP formula_, SEXP data_, SEXP colnames_,
SEXP family_, SEXP link_, SEXP beta_,
SEXP theta_){
std::string formula = as<std::string>(formula_);
Eigen::ArrayXXd data = as<Eigen::ArrayXXd>(data_);
std::vector<std::string> colnames = as<std::vector<std::string> >(colnames_);
std::string family = as<std::string>(family_);
std::string link = as<std::string>(link_);
std::vector<double> beta = as<std::vector<double> >(beta_);
std::vector<double> theta = as<std::vector<double> >(theta_);
glmmr::ModelBits model(formula,data,colnames,family,link);
model.linear_predictor.update_parameters(beta);
model.covariance.update_parameters(theta);
XPtr<glmmr::Model> ptr(new glmmr::Model(model),true);
SEXP Model__new_from_bits(SEXP bptr_){
XPtr<glmmr::ModelBits> bptr(bptr_);
XPtr<glmmr::Model> ptr(new glmmr::Model(*bptr),true);
return ptr;
}

// [[Rcpp::export]]
void Model__set_y(SEXP xp, SEXP y_){
Eigen::VectorXd offset = as<Eigen::VectorXd>(y_);
Eigen::VectorXd y = as<Eigen::VectorXd>(y_);
XPtr<glmmr::Model> ptr(xp);
ptr->set_y(offset);
ptr->set_y(y);
}

// [[Rcpp::export]]
Expand Down
36 changes: 24 additions & 12 deletions tests/testthat/test-model.R
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,14 @@ test_that("model dense ml", {
df$t1 <- I(df$t==1)*1
df$t2 <- I(df$t==2)*1
df$t3 <- I(df$t==3)*1
mptr <- Model__new(y, "t2 + t3 + (1|gr(j))",
as.matrix(df),
colnames(df),
"gaussian","identity")
mbptr <- ModelBits__new(
"t2 + t3 + (1|gr(j))",
as.matrix(df),
colnames(df),
"gaussian","identity",c(0,0,0), c(0.05)
)
mptr <- Model__new_from_bits(mbptr)
Model__set_y(mptr,y)
expect_error(Model__update_beta(mptr,c(0,0)))
Model__update_beta(mptr,c(0.1,0.1,0.1))
expect_error(Model__update_theta(mptr,c(0.25,0.25)))
Expand All @@ -30,10 +34,14 @@ test_that("model dense la", {
df$t1 <- I(df$t==1)*1
df$t2 <- I(df$t==2)*1
df$t3 <- I(df$t==3)*1
mptr <- Model__new(y, "t2 + t3 + (1|gr(j))",
as.matrix(df),
colnames(df),
"gaussian","identity")
mbptr <- ModelBits__new(
"t2 + t3 + (1|gr(j))",
as.matrix(df),
colnames(df),
"gaussian","identity",c(0,0,0), c(0.05)
)
mptr <- Model__new_from_bits(mbptr)
Model__set_y(mptr,y)
Model__update_beta(mptr,c(0.1,0.1,0.1))
Model__update_theta(mptr,c(0.0625))
Model__set_var_par(mptr,1)
Expand All @@ -56,10 +64,14 @@ test_that("model sparse ml", {
df$t1 <- I(df$t==1)*1
df$t2 <- I(df$t==2)*1
df$t3 <- I(df$t==3)*1
mptr <- Model__new(y, "t2 + t3 + (1|gr(j))",
as.matrix(df),
colnames(df),
"gaussian","identity")
mbptr <- ModelBits__new(
"t2 + t3 + (1|gr(j))",
as.matrix(df),
colnames(df),
"gaussian","identity",c(0,0,0), c(0.05)
)
mptr <- Model__new_from_bits(mbptr)
Model__set_y(mptr,y)
expect_error(Model__update_beta(mptr,c(0,0)))
Model__update_beta(mptr,c(0.1,0.1,0.1))
expect_error(Model__update_theta(mptr,c(0.25,0.25)))
Expand Down

0 comments on commit cd669ba

Please sign in to comment.