R Under development (unstable) (2026-01-06 r89280) -- "Unsuffered Consequences" Copyright (C) 2026 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. Natural language support but running in an English locale R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > pkgname <- "pense" > source(file.path(R.home("share"), "R", "examples-header.R")) > options(warn = 1) > library('pense') Loading required package: Matrix > > base::assign(".oldSearch", base::search(), pos = 'CheckExEnv') > base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv') > cleanEx() > nameEx("coef.pense_cvfit") > ### * coef.pense_cvfit > > flush(stderr()); flush(stdout()) > > ### Name: coef.pense_cvfit > ### Title: Extract Coefficient Estimates > ### Aliases: coef.pense_cvfit > > ### ** Examples > > # Compute the PENSE regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- pense(x, freeny$y, alpha = 0.5) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/Mat_meat.hpp:7478:26: runtime error: reference binding to null pointer of type 'const double' #0 0x7b0183cb5b8c in arma::Mat::colptr(unsigned int) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/Mat_meat.hpp:7478 #1 0x7b0183cb5b8c in arma::subview::extract(arma::Mat&, arma::subview const&) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/subview_meat.hpp:1648 #2 0x7b01843fa34e in arma::quasi_unwrap >::quasi_unwrap(arma::subview const&) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/unwrap.hpp:362 #3 0x7b01843fa34e in void arma::glue_join_cols::apply, arma::subview >(arma::Mat_noalias::elem_type>&, arma::Glue, arma::subview, arma::glue_join_cols> const&) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/glue_join_meat.hpp:91 #4 0x7b01843fa34e in arma::Mat::Mat, arma::subview, arma::glue_join_cols>(arma::Glue, arma::subview, arma::glue_join_cols> const&) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/Mat_meat.hpp:6240 #5 0x7b01843fa34e in nsoptim::PredictorResponseData::RemoveObservation(unsigned int) const nsoptim/objective/../container/data.hpp:62 #6 0x7b01845493ba in std::forward_list > pense::enpy_psc_internal::ComputeLoo > > >(nsoptim::LsRegressionLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, unsigned int, unsigned int, nsoptim::AugmentedLarsOptimizer > >*, std::forward_list, std::allocator > >*) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/enpy_psc.hpp:139 #7 0x7b018447cce3 in std::forward_list > > >, std::allocator > > > > > pense::enpy_psc_internal::ComputePscs > >, void>(nsoptim::LsRegressionLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, nsoptim::AugmentedLarsOptimizer > >) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/enpy_psc.hpp:357 #8 0x7b01844579fc in std::forward_list > > >, std::allocator > > > > > pense::PrincipalSensitiviyComponents > > >(nsoptim::LsRegressionLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, nsoptim::AugmentedLarsOptimizer > > const&, int) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/enpy_psc.hpp:439 #9 0x7b01844579fc in std::forward_list > > >, std::allocator > > > > > pense::enpy_initest_internal::ComputeENPY > > >(pense::SLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, nsoptim::AugmentedLarsOptimizer > > const&, pense::enpy_initest_internal::PyConfiguration const&) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/enpy_initest.hpp:217 #10 0x7b018440f4bf in std::forward_list > > >, std::allocator > > > > > pense::PenaYohaiInitialEstimators > > >(pense::SLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, nsoptim::AugmentedLarsOptimizer > > const&, Rcpp::Vector<19, Rcpp::PreserveStorage> const&) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/enpy_initest.hpp:464 #11 0x7b018493e2e6 in EnpyInitialEstimatesImpl > >, pense::CDPense > > > /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:183 #12 0x7b018493e2e6 in std::forward_list > >::Coefficients, std::allocator > >::Coefficients> >, std::allocator > >::Coefficients, std::allocator > >::Coefficients> > > > (anonymous namespace)::EnpyInitialEstimates > > >(pense::SLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, SEXPREC*, SEXPREC*, SEXPREC*, Rcpp::Vector<19, Rcpp::PreserveStorage> const&, nsoptim::_metrics_internal::Metrics<0>*) [clone .isra.0] /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:230 #13 0x7b01848f9d20 in PenseRegressionImpl > >, nsoptim::RegressionCoefficients > > > /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:271 #14 0x7b01848552b1 in PenseMMPenaltyImpl > >, nsoptim::EnPenalty> /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:370 #15 0x7b01848552b1 in PenseMMDispatch /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:428 #16 0x7b01847ff158 in PensePenaltyDispatch /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:503 #17 0x7b01847ff158 in pense::r_interface::PenseEnRegression(SEXPREC*, SEXPREC*, SEXPREC*, SEXPREC*, SEXPREC*, SEXPREC*, SEXPREC*) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:560 #18 0x00000074f120 in R_doDotCall /data/gannet/ripley/R/svn/R-devel/src/main/dotcode.c:775 #19 0x0000008e55aa in bcEval_loop /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:8682 #20 0x0000008bbdeb in bcEval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:7515 #21 0x00000085c642 in Rf_eval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:1167 #22 0x00000087238a in R_execClosure /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2389 #23 0x00000087644a in applyClosure_core /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2302 #24 0x000000877d67 in Rf_applyClosure /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2324 #25 0x000000877d67 in R_forceAndCall /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2456 #26 0x000000a1829e in do_mapply /data/gannet/ripley/R/svn/R-devel/src/main/mapply.c:113 #27 0x0000008ceba2 in bcEval_loop /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:8132 #28 0x0000008bbdeb in bcEval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:7515 #29 0x00000085c642 in Rf_eval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:1167 #30 0x00000087238a in R_execClosure /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2389 #31 0x00000087644a in applyClosure_core /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2302 #32 0x00000085cce3 in Rf_applyClosure /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2324 #33 0x00000085cce3 in Rf_eval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:1280 #34 0x00000088abfe in do_set /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:3581 #35 0x00000085d106 in Rf_eval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:1232 #36 0x000000a02488 in Rf_ReplIteration /data/gannet/ripley/R/svn/R-devel/src/main/main.c:264 #37 0x000000a02488 in R_ReplConsole /data/gannet/ripley/R/svn/R-devel/src/main/main.c:317 #38 0x000000a1056a in run_Rmainloop /data/gannet/ripley/R/svn/R-devel/src/main/main.c:1235 #39 0x000000a10602 in Rf_mainloop /data/gannet/ripley/R/svn/R-devel/src/main/main.c:1242 #40 0x0000004131bf in main /data/gannet/ripley/R/svn/R-devel/src/main/Rmain.c:29 #41 0x7f01a1e115f4 in __libc_start_call_main (/lib64/libc.so.6+0x35f4) (BuildId: a1dda014206b55b07f58fe8db80121b752dc3d03) #42 0x7f01a1e116a7 in __libc_start_main@@GLIBC_2.34 (/lib64/libc.so.6+0x36a7) (BuildId: a1dda014206b55b07f58fe8db80121b752dc3d03) #43 0x000000413ba4 in _start (/data/gannet/ripley/R/gcc-SAN3/bin/exec/R+0x413ba4) (BuildId: 0c03d4154a5b678104cbaeb4a17a1709e99fc88d) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/access.hpp:26:100: runtime error: reference binding to null pointer of type 'double' #0 0x7b0183cb59fc in double& arma::access::rw(double const&) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/access.hpp:26 #1 0x7b0183cb59fc in arma::Mat::colptr(unsigned int) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/Mat_meat.hpp:7478 #2 0x7b0183cb59fc in arma::subview::extract(arma::Mat&, arma::subview const&) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/subview_meat.hpp:1648 #3 0x7b01843fa34e in arma::quasi_unwrap >::quasi_unwrap(arma::subview const&) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/unwrap.hpp:362 #4 0x7b01843fa34e in void arma::glue_join_cols::apply, arma::subview >(arma::Mat_noalias::elem_type>&, arma::Glue, arma::subview, arma::glue_join_cols> const&) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/glue_join_meat.hpp:91 #5 0x7b01843fa34e in arma::Mat::Mat, arma::subview, arma::glue_join_cols>(arma::Glue, arma::subview, arma::glue_join_cols> const&) /data/gannet/ripley/R/test-dev/RcppArmadillo/include/current/armadillo_bits/Mat_meat.hpp:6240 #6 0x7b01843fa34e in nsoptim::PredictorResponseData::RemoveObservation(unsigned int) const nsoptim/objective/../container/data.hpp:62 #7 0x7b01845493ba in std::forward_list > pense::enpy_psc_internal::ComputeLoo > > >(nsoptim::LsRegressionLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, unsigned int, unsigned int, nsoptim::AugmentedLarsOptimizer > >*, std::forward_list, std::allocator > >*) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/enpy_psc.hpp:139 #8 0x7b018447cce3 in std::forward_list > > >, std::allocator > > > > > pense::enpy_psc_internal::ComputePscs > >, void>(nsoptim::LsRegressionLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, nsoptim::AugmentedLarsOptimizer > >) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/enpy_psc.hpp:357 #9 0x7b01844579fc in std::forward_list > > >, std::allocator > > > > > pense::PrincipalSensitiviyComponents > > >(nsoptim::LsRegressionLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, nsoptim::AugmentedLarsOptimizer > > const&, int) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/enpy_psc.hpp:439 #10 0x7b01844579fc in std::forward_list > > >, std::allocator > > > > > pense::enpy_initest_internal::ComputeENPY > > >(pense::SLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, nsoptim::AugmentedLarsOptimizer > > const&, pense::enpy_initest_internal::PyConfiguration const&) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/enpy_initest.hpp:217 #11 0x7b018440f4bf in std::forward_list > > >, std::allocator > > > > > pense::PenaYohaiInitialEstimators > > >(pense::SLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, nsoptim::AugmentedLarsOptimizer > > const&, Rcpp::Vector<19, Rcpp::PreserveStorage> const&) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/enpy_initest.hpp:464 #12 0x7b018493e2e6 in EnpyInitialEstimatesImpl > >, pense::CDPense > > > /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:183 #13 0x7b018493e2e6 in std::forward_list > >::Coefficients, std::allocator > >::Coefficients> >, std::allocator > >::Coefficients, std::allocator > >::Coefficients> > > > (anonymous namespace)::EnpyInitialEstimates > > >(pense::SLoss const&, std::forward_list > >::PenaltyFunction, std::allocator > >::PenaltyFunction> > const&, SEXPREC*, SEXPREC*, SEXPREC*, Rcpp::Vector<19, Rcpp::PreserveStorage> const&, nsoptim::_metrics_internal::Metrics<0>*) [clone .isra.0] /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:230 #14 0x7b01848f9d20 in PenseRegressionImpl > >, nsoptim::RegressionCoefficients > > > /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:271 #15 0x7b01848552b1 in PenseMMPenaltyImpl > >, nsoptim::EnPenalty> /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:370 #16 0x7b01848552b1 in PenseMMDispatch /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:428 #17 0x7b01847ff158 in PensePenaltyDispatch /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:503 #18 0x7b01847ff158 in pense::r_interface::PenseEnRegression(SEXPREC*, SEXPREC*, SEXPREC*, SEXPREC*, SEXPREC*, SEXPREC*, SEXPREC*) /data/gannet/ripley/R/packages/tests-gcc-SAN/pense/src/r_pense_regression.cc:560 #19 0x00000074f120 in R_doDotCall /data/gannet/ripley/R/svn/R-devel/src/main/dotcode.c:775 #20 0x0000008e55aa in bcEval_loop /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:8682 #21 0x0000008bbdeb in bcEval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:7515 #22 0x00000085c642 in Rf_eval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:1167 #23 0x00000087238a in R_execClosure /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2389 #24 0x00000087644a in applyClosure_core /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2302 #25 0x000000877d67 in Rf_applyClosure /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2324 #26 0x000000877d67 in R_forceAndCall /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2456 #27 0x000000a1829e in do_mapply /data/gannet/ripley/R/svn/R-devel/src/main/mapply.c:113 #28 0x0000008ceba2 in bcEval_loop /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:8132 #29 0x0000008bbdeb in bcEval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:7515 #30 0x00000085c642 in Rf_eval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:1167 #31 0x00000087238a in R_execClosure /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2389 #32 0x00000087644a in applyClosure_core /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2302 #33 0x00000085cce3 in Rf_applyClosure /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:2324 #34 0x00000085cce3 in Rf_eval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:1280 #35 0x00000088abfe in do_set /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:3581 #36 0x00000085d106 in Rf_eval /data/gannet/ripley/R/svn/R-devel/src/main/eval.c:1232 #37 0x000000a02488 in Rf_ReplIteration /data/gannet/ripley/R/svn/R-devel/src/main/main.c:264 #38 0x000000a02488 in R_ReplConsole /data/gannet/ripley/R/svn/R-devel/src/main/main.c:317 #39 0x000000a1056a in run_Rmainloop /data/gannet/ripley/R/svn/R-devel/src/main/main.c:1235 #40 0x000000a10602 in Rf_mainloop /data/gannet/ripley/R/svn/R-devel/src/main/main.c:1242 #41 0x0000004131bf in main /data/gannet/ripley/R/svn/R-devel/src/main/Rmain.c:29 #42 0x7f01a1e115f4 in __libc_start_call_main (/lib64/libc.so.6+0x35f4) (BuildId: a1dda014206b55b07f58fe8db80121b752dc3d03) #43 0x7f01a1e116a7 in __libc_start_main@@GLIBC_2.34 (/lib64/libc.so.6+0x36a7) (BuildId: a1dda014206b55b07f58fe8db80121b752dc3d03) #44 0x000000413ba4 in _start (/data/gannet/ripley/R/gcc-SAN3/bin/exec/R+0x413ba4) (BuildId: 0c03d4154a5b678104cbaeb4a17a1709e99fc88d) > plot(regpath) > > # Extract the coefficients at a certain penalization level > coef(regpath, lambda = regpath$lambda[[1]][[40]]) (Intercept) lag.quarterly.revenue price.index -7.9064997 0.2125014 -0.7070107 income.level market.potential 0.7141099 1.0796662 > > # What penalization level leads to good prediction performance? > set.seed(123) > cv_results <- pense_cv(x, freeny$y, alpha = 0.5, + cv_repl = 2, cv_k = 4) > plot(cv_results, se_mult = 1) > > # Extract the coefficients at the penalization level with > # smallest prediction error ... > coef(cv_results) (Intercept) lag.quarterly.revenue price.index -7.9064997 0.2125014 -0.7070107 income.level market.potential 0.7141099 1.0796662 > # ... or at the penalization level with prediction error > # statistically indistinguishable from the minimum. > coef(cv_results, lambda = '1-se') (Intercept) lag.quarterly.revenue price.index -7.8652589 0.2141280 -0.7053433 income.level market.potential 0.7126978 1.0754335 > > > > cleanEx() > nameEx("coef.pense_fit") > ### * coef.pense_fit > > flush(stderr()); flush(stdout()) > > ### Name: coef.pense_fit > ### Title: Extract Coefficient Estimates > ### Aliases: coef.pense_fit > > ### ** Examples > > # Compute the PENSE regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- pense(x, freeny$y, alpha = 0.5) > plot(regpath) > > # Extract the coefficients at a certain penalization level > coef(regpath, lambda = regpath$lambda[[1]][[40]]) (Intercept) lag.quarterly.revenue price.index -7.9064997 0.2125014 -0.7070107 income.level market.potential 0.7141099 1.0796662 > > # What penalization level leads to good prediction performance? > set.seed(123) > cv_results <- pense_cv(x, freeny$y, alpha = 0.5, + cv_repl = 2, cv_k = 4) > plot(cv_results, se_mult = 1) > > # Extract the coefficients at the penalization level with > # smallest prediction error ... > coef(cv_results) (Intercept) lag.quarterly.revenue price.index -7.9064997 0.2125014 -0.7070107 income.level market.potential 0.7141099 1.0796662 > # ... or at the penalization level with prediction error > # statistically indistinguishable from the minimum. > coef(cv_results, lambda = '1-se') (Intercept) lag.quarterly.revenue price.index -7.8652589 0.2141280 -0.7053433 income.level market.potential 0.7126978 1.0754335 > > > > cleanEx() > nameEx("elnet") > ### * elnet > > flush(stderr()); flush(stdout()) > > ### Name: elnet > ### Title: Compute the Least Squares (Adaptive) Elastic Net Regularization > ### Path > ### Aliases: elnet adaelnet adaen > > ### ** Examples > > # Compute the LS-EN regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- elnet(x, freeny$y, alpha = c(0.5, 0.75)) > plot(regpath) > plot(regpath, alpha = 0.75) > > # Extract the coefficients at a certain penalization level > coef(regpath, lambda = regpath$lambda[[1]][[5]], + alpha = 0.75) (Intercept) lag.quarterly.revenue price.index 9.306304 0.000000 0.000000 income.level market.potential 0.000000 0.000000 > > # What penalization level leads to good prediction performance? > set.seed(123) > cv_results <- elnet_cv(x, freeny$y, alpha = c(0.5, 0.75), + cv_repl = 10, cv_k = 4, + cv_measure = "tau") > plot(cv_results, se_mult = 1.5) > plot(cv_results, se_mult = 1.5, what = "coef.path") > > > # Extract the coefficients at the penalization level with > # smallest prediction error ... > summary(cv_results) EN fit with prediction performance estimated by replications of 4-fold cross-validation. 4 out of 4 predictors have non-zero coefficients: Estimate (Intercept) -9.6491805 lag.quarterly.revenue 0.1899399 price.index -0.6858733 income.level 0.7075924 market.potential 1.2247539 --- Hyper-parameters: lambda=0.003787891, alpha=0.5 > coef(cv_results) (Intercept) lag.quarterly.revenue price.index -9.6491805 0.1899399 -0.6858733 income.level market.potential 0.7075924 1.2247539 > # ... or at the penalization level with prediction error > # statistically indistinguishable from the minimum. > summary(cv_results, lambda = "1.5-se") EN fit with prediction performance estimated by replications of 4-fold cross-validation. 4 out of 4 predictors have non-zero coefficients: Estimate (Intercept) -9.5875726 lag.quarterly.revenue 0.2270959 price.index -0.6216322 income.level 0.6519060 market.potential 1.1972788 --- Hyper-parameters: lambda=0.01303176, alpha=0.5 > coef(cv_results, lambda = "1.5-se") (Intercept) lag.quarterly.revenue price.index -9.5875726 0.2270959 -0.6216322 income.level market.potential 0.6519060 1.1972788 > > > > cleanEx() > nameEx("elnet_cv") > ### * elnet_cv > > flush(stderr()); flush(stdout()) > > ### Name: elnet_cv > ### Title: Cross-validation for Least-Squares (Adaptive) Elastic Net > ### Estimates > ### Aliases: elnet_cv > > ### ** Examples > > # Compute the LS-EN regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- elnet(x, freeny$y, alpha = c(0.5, 0.75)) > plot(regpath) > plot(regpath, alpha = 0.75) > > # Extract the coefficients at a certain penalization level > coef(regpath, lambda = regpath$lambda[[1]][[5]], + alpha = 0.75) (Intercept) lag.quarterly.revenue price.index 9.306304 0.000000 0.000000 income.level market.potential 0.000000 0.000000 > > # What penalization level leads to good prediction performance? > set.seed(123) > cv_results <- elnet_cv(x, freeny$y, alpha = c(0.5, 0.75), + cv_repl = 10, cv_k = 4, + cv_measure = "tau") > plot(cv_results, se_mult = 1.5) > plot(cv_results, se_mult = 1.5, what = "coef.path") > > > # Extract the coefficients at the penalization level with > # smallest prediction error ... > summary(cv_results) EN fit with prediction performance estimated by replications of 4-fold cross-validation. 4 out of 4 predictors have non-zero coefficients: Estimate (Intercept) -9.6491805 lag.quarterly.revenue 0.1899399 price.index -0.6858733 income.level 0.7075924 market.potential 1.2247539 --- Hyper-parameters: lambda=0.003787891, alpha=0.5 > coef(cv_results) (Intercept) lag.quarterly.revenue price.index -9.6491805 0.1899399 -0.6858733 income.level market.potential 0.7075924 1.2247539 > # ... or at the penalization level with prediction error > # statistically indistinguishable from the minimum. > summary(cv_results, lambda = "1.5-se") EN fit with prediction performance estimated by replications of 4-fold cross-validation. 4 out of 4 predictors have non-zero coefficients: Estimate (Intercept) -9.5875726 lag.quarterly.revenue 0.2270959 price.index -0.6216322 income.level 0.6519060 market.potential 1.1972788 --- Hyper-parameters: lambda=0.01303176, alpha=0.5 > coef(cv_results, lambda = "1.5-se") (Intercept) lag.quarterly.revenue price.index -9.5875726 0.2270959 -0.6216322 income.level market.potential 0.6519060 1.1972788 > > > > cleanEx() > nameEx("pense") > ### * pense > > flush(stderr()); flush(stdout()) > > ### Name: pense > ### Title: Compute (Adaptive) Elastic Net S-Estimates of Regression > ### Aliases: pense adapense > > ### ** Examples > > # Compute the PENSE regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- pense(x, freeny$y, alpha = 0.5) > plot(regpath) > > # Extract the coefficients at a certain penalization level > coef(regpath, lambda = regpath$lambda[[1]][[40]]) (Intercept) lag.quarterly.revenue price.index -7.9064997 0.2125014 -0.7070107 income.level market.potential 0.7141099 1.0796662 > > # What penalization level leads to good prediction performance? > set.seed(123) > cv_results <- pense_cv(x, freeny$y, alpha = 0.5, + cv_repl = 2, cv_k = 4) > plot(cv_results, se_mult = 1) > > # Extract the coefficients at the penalization level with > # smallest prediction error ... > coef(cv_results) (Intercept) lag.quarterly.revenue price.index -7.9064997 0.2125014 -0.7070107 income.level market.potential 0.7141099 1.0796662 > # ... or at the penalization level with prediction error > # statistically indistinguishable from the minimum. > coef(cv_results, lambda = '1-se') (Intercept) lag.quarterly.revenue price.index -7.8652589 0.2141280 -0.7053433 income.level market.potential 0.7126978 1.0754335 > > > > cleanEx() > nameEx("pense_cv") > ### * pense_cv > > flush(stderr()); flush(stdout()) > > ### Name: pense_cv > ### Title: Cross-validation for (Adaptive) PENSE Estimates > ### Aliases: pense_cv adapense_cv > > ### ** Examples > > # Compute the adaptive PENSE regularization path for Freeny's > # revenue data (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > ## Either use the convenience function directly ... > set.seed(123) > ada_convenience <- adapense_cv(x, freeny$y, alpha = 0.5, + cv_repl = 2, cv_k = 4) > > ## ... or compute the steps manually: > # Step 1: Compute preliminary estimates with CV > set.seed(123) > preliminary_estimate <- pense_cv(x, freeny$y, alpha = 0, + cv_repl = 2, cv_k = 4) > plot(preliminary_estimate, se_mult = 1) > > # Step 2: Use the coefficients with best prediction performance > # to define the penalty loadings: > prelim_coefs <- coef(preliminary_estimate, lambda = 'min') > pen_loadings <- 1 / abs(prelim_coefs[-1]) > > # Step 3: Compute the adaptive PENSE estimates and estimate > # their prediction performance. > set.seed(123) > ada_manual <- pense_cv(x, freeny$y, alpha = 0.5, + cv_repl = 2, cv_k = 4, + penalty_loadings = pen_loadings) > > # Visualize the prediction performance and coefficient path of > # the adaptive PENSE estimates (manual vs. automatic) > def.par <- par(no.readonly = TRUE) > layout(matrix(1:4, ncol = 2, byrow = TRUE)) > plot(ada_convenience$preliminary) > plot(preliminary_estimate) > plot(ada_convenience) > plot(ada_manual) > par(def.par) > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("plot.pense_cvfit") > ### * plot.pense_cvfit > > flush(stderr()); flush(stdout()) > > ### Name: plot.pense_cvfit > ### Title: Plot Method for Penalized Estimates With Cross-Validation > ### Aliases: plot.pense_cvfit > > ### ** Examples > > # Compute the PENSE regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- pense(x, freeny$y, alpha = 0.5) > plot(regpath) > > # Extract the coefficients at a certain penalization level > coef(regpath, lambda = regpath$lambda[[1]][[40]]) (Intercept) lag.quarterly.revenue price.index -7.9064997 0.2125014 -0.7070107 income.level market.potential 0.7141099 1.0796662 > > # What penalization level leads to good prediction performance? > set.seed(123) > cv_results <- pense_cv(x, freeny$y, alpha = 0.5, + cv_repl = 2, cv_k = 4) > plot(cv_results, se_mult = 1) > > # Extract the coefficients at the penalization level with > # smallest prediction error ... > coef(cv_results) (Intercept) lag.quarterly.revenue price.index -7.9064997 0.2125014 -0.7070107 income.level market.potential 0.7141099 1.0796662 > # ... or at the penalization level with prediction error > # statistically indistinguishable from the minimum. > coef(cv_results, lambda = '1-se') (Intercept) lag.quarterly.revenue price.index -7.8652589 0.2141280 -0.7053433 income.level market.potential 0.7126978 1.0754335 > > > > cleanEx() > nameEx("plot.pense_fit") > ### * plot.pense_fit > > flush(stderr()); flush(stdout()) > > ### Name: plot.pense_fit > ### Title: Plot Method for Penalized Estimates > ### Aliases: plot.pense_fit > > ### ** Examples > > # Compute the PENSE regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- pense(x, freeny$y, alpha = 0.5) > plot(regpath) > > # Extract the coefficients at a certain penalization level > coef(regpath, lambda = regpath$lambda[[1]][[40]]) (Intercept) lag.quarterly.revenue price.index -7.9064997 0.2125014 -0.7070107 income.level market.potential 0.7141099 1.0796662 > > # What penalization level leads to good prediction performance? > set.seed(123) > cv_results <- pense_cv(x, freeny$y, alpha = 0.5, + cv_repl = 2, cv_k = 4) > plot(cv_results, se_mult = 1) > > # Extract the coefficients at the penalization level with > # smallest prediction error ... > coef(cv_results) (Intercept) lag.quarterly.revenue price.index -7.9064997 0.2125014 -0.7070107 income.level market.potential 0.7141099 1.0796662 > # ... or at the penalization level with prediction error > # statistically indistinguishable from the minimum. > coef(cv_results, lambda = '1-se') (Intercept) lag.quarterly.revenue price.index -7.8652589 0.2141280 -0.7053433 income.level market.potential 0.7126978 1.0754335 > > > > cleanEx() > nameEx("predict.pense_cvfit") > ### * predict.pense_cvfit > > flush(stderr()); flush(stdout()) > > ### Name: predict.pense_cvfit > ### Title: Predict Method for PENSE Fits > ### Aliases: predict.pense_cvfit > > ### ** Examples > > # Compute the LS-EN regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- elnet(x, freeny$y, alpha = 0.75) > > # Predict the response using a specific penalization level > predict(regpath, newdata = freeny[1:5, 2:5], + lambda = regpath$lambda[[1]][[10]]) 1962.25 1962.5 1962.75 1963 1963.25 9.071638 9.075877 9.082341 9.091051 9.103643 > > # Extract the residuals at a certain penalization level > residuals(regpath, lambda = regpath$lambda[[1]][[5]]) Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.396169224 -0.398919454 -0.378220695 1963 -0.384593892 -0.294338479 -0.278030430 -0.259140970 1964 -0.265543684 -0.219857551 -0.206023748 -0.173545125 1965 -0.192086388 -0.146253918 -0.133786817 -0.096671191 1966 -0.090347926 -0.044601264 -0.027307275 -0.015531362 1967 0.006739697 0.037235389 0.038869333 0.071407419 1968 0.086275062 0.102036759 0.141643505 0.167310507 1969 0.175437660 0.211500792 0.222942549 0.260029329 1970 0.247870647 0.296181577 0.294470169 0.278217170 1971 0.306686546 0.334684047 0.353729154 0.367702082 > > # Select penalization level via cross-validation > set.seed(123) > cv_results <- elnet_cv(x, freeny$y, alpha = 0.5, + cv_repl = 10, cv_k = 4) > > # Predict the response using the "best" penalization level > predict(cv_results, newdata = freeny[1:5, 2:5]) 1962.25 1962.5 1962.75 1963 1963.25 8.795162 8.807070 8.824535 8.842175 8.882970 > > # Extract the residuals at the "best" penalization level > residuals(cv_results) Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.002801588 -0.015699794 -0.009674689 1963 -0.029165027 0.024539756 0.012794692 0.013400637 1964 -0.014177641 0.006727561 -0.001786569 0.012847301 1965 -0.026214283 -0.003738367 -0.010414370 -0.002497682 1966 -0.014953173 0.014923561 0.008857587 0.002955549 1967 -0.002438578 0.011336771 -0.004817002 -0.002203485 1968 -0.014622332 -0.016038521 0.004783442 0.009509723 1969 0.008935501 0.025135859 0.011008309 0.028171208 1970 -0.021812388 0.015300404 -0.005936032 -0.018484409 1971 -0.011338929 0.005850584 0.003783641 0.007952774 > # Extract the residuals at a more parsimonious penalization level > residuals(cv_results, lambda = "1.5-se") Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.0133914763 -0.0253025812 -0.0180073467 1963 -0.0375085950 0.0197904960 0.0063674275 0.0071084444 1964 -0.0199445085 0.0028559476 -0.0059886764 0.0089958759 1965 -0.0300906249 -0.0052788324 -0.0128420949 -0.0036364879 1966 -0.0166186511 0.0137904162 0.0073750895 0.0016121587 1967 -0.0021618673 0.0116385091 -0.0046227113 0.0002016235 1968 -0.0117185912 -0.0127504673 0.0088610048 0.0131305182 1969 0.0114094522 0.0285319822 0.0147126545 0.0325247699 1970 -0.0158575338 0.0222103711 0.0004937030 -0.0127802705 1971 -0.0035765461 0.0133307584 0.0117139290 0.0154227310 > > > > cleanEx() > nameEx("predict.pense_fit") > ### * predict.pense_fit > > flush(stderr()); flush(stdout()) > > ### Name: predict.pense_fit > ### Title: Predict Method for PENSE Fits > ### Aliases: predict.pense_fit > > ### ** Examples > > # Compute the LS-EN regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- elnet(x, freeny$y, alpha = 0.75) > > # Predict the response using a specific penalization level > predict(regpath, newdata = freeny[1:5, 2:5], + lambda = regpath$lambda[[1]][[10]]) 1962.25 1962.5 1962.75 1963 1963.25 9.071638 9.075877 9.082341 9.091051 9.103643 > > # Extract the residuals at a certain penalization level > residuals(regpath, lambda = regpath$lambda[[1]][[5]]) Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.396169224 -0.398919454 -0.378220695 1963 -0.384593892 -0.294338479 -0.278030430 -0.259140970 1964 -0.265543684 -0.219857551 -0.206023748 -0.173545125 1965 -0.192086388 -0.146253918 -0.133786817 -0.096671191 1966 -0.090347926 -0.044601264 -0.027307275 -0.015531362 1967 0.006739697 0.037235389 0.038869333 0.071407419 1968 0.086275062 0.102036759 0.141643505 0.167310507 1969 0.175437660 0.211500792 0.222942549 0.260029329 1970 0.247870647 0.296181577 0.294470169 0.278217170 1971 0.306686546 0.334684047 0.353729154 0.367702082 > > # Select penalization level via cross-validation > set.seed(123) > cv_results <- elnet_cv(x, freeny$y, alpha = 0.5, + cv_repl = 10, cv_k = 4) > > # Predict the response using the "best" penalization level > predict(cv_results, newdata = freeny[1:5, 2:5]) 1962.25 1962.5 1962.75 1963 1963.25 8.795162 8.807070 8.824535 8.842175 8.882970 > > # Extract the residuals at the "best" penalization level > residuals(cv_results) Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.002801588 -0.015699794 -0.009674689 1963 -0.029165027 0.024539756 0.012794692 0.013400637 1964 -0.014177641 0.006727561 -0.001786569 0.012847301 1965 -0.026214283 -0.003738367 -0.010414370 -0.002497682 1966 -0.014953173 0.014923561 0.008857587 0.002955549 1967 -0.002438578 0.011336771 -0.004817002 -0.002203485 1968 -0.014622332 -0.016038521 0.004783442 0.009509723 1969 0.008935501 0.025135859 0.011008309 0.028171208 1970 -0.021812388 0.015300404 -0.005936032 -0.018484409 1971 -0.011338929 0.005850584 0.003783641 0.007952774 > # Extract the residuals at a more parsimonious penalization level > residuals(cv_results, lambda = "1.5-se") Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.0133914763 -0.0253025812 -0.0180073467 1963 -0.0375085950 0.0197904960 0.0063674275 0.0071084444 1964 -0.0199445085 0.0028559476 -0.0059886764 0.0089958759 1965 -0.0300906249 -0.0052788324 -0.0128420949 -0.0036364879 1966 -0.0166186511 0.0137904162 0.0073750895 0.0016121587 1967 -0.0021618673 0.0116385091 -0.0046227113 0.0002016235 1968 -0.0117185912 -0.0127504673 0.0088610048 0.0131305182 1969 0.0114094522 0.0285319822 0.0147126545 0.0325247699 1970 -0.0158575338 0.0222103711 0.0004937030 -0.0127802705 1971 -0.0035765461 0.0133307584 0.0117139290 0.0154227310 > > > > cleanEx() > nameEx("regmest_cv") > ### * regmest_cv > > flush(stderr()); flush(stdout()) > > ### Name: regmest_cv > ### Title: Cross-validation for (Adaptive) Elastic Net M-Estimates > ### Aliases: regmest_cv adamest_cv > > ### ** Examples > > # Compute the adaptive PENSE regularization path for Freeny's > # revenue data (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > ## Either use the convenience function directly ... > set.seed(123) > ada_convenience <- adapense_cv(x, freeny$y, alpha = 0.5, + cv_repl = 2, cv_k = 4) > > ## ... or compute the steps manually: > # Step 1: Compute preliminary estimates with CV > set.seed(123) > preliminary_estimate <- pense_cv(x, freeny$y, alpha = 0, + cv_repl = 2, cv_k = 4) > plot(preliminary_estimate, se_mult = 1) > > # Step 2: Use the coefficients with best prediction performance > # to define the penalty loadings: > prelim_coefs <- coef(preliminary_estimate, lambda = 'min') > pen_loadings <- 1 / abs(prelim_coefs[-1]) > > # Step 3: Compute the adaptive PENSE estimates and estimate > # their prediction performance. > set.seed(123) > ada_manual <- pense_cv(x, freeny$y, alpha = 0.5, + cv_repl = 2, cv_k = 4, + penalty_loadings = pen_loadings) > > # Visualize the prediction performance and coefficient path of > # the adaptive PENSE estimates (manual vs. automatic) > def.par <- par(no.readonly = TRUE) > layout(matrix(1:4, ncol = 2, byrow = TRUE)) > plot(ada_convenience$preliminary) > plot(preliminary_estimate) > plot(ada_convenience) > plot(ada_manual) > par(def.par) > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("residuals.pense_cvfit") > ### * residuals.pense_cvfit > > flush(stderr()); flush(stdout()) > > ### Name: residuals.pense_cvfit > ### Title: Extract Residuals > ### Aliases: residuals.pense_cvfit > > ### ** Examples > > # Compute the LS-EN regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- elnet(x, freeny$y, alpha = 0.75) > > # Predict the response using a specific penalization level > predict(regpath, newdata = freeny[1:5, 2:5], + lambda = regpath$lambda[[1]][[10]]) 1962.25 1962.5 1962.75 1963 1963.25 9.071638 9.075877 9.082341 9.091051 9.103643 > > # Extract the residuals at a certain penalization level > residuals(regpath, lambda = regpath$lambda[[1]][[5]]) Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.396169224 -0.398919454 -0.378220695 1963 -0.384593892 -0.294338479 -0.278030430 -0.259140970 1964 -0.265543684 -0.219857551 -0.206023748 -0.173545125 1965 -0.192086388 -0.146253918 -0.133786817 -0.096671191 1966 -0.090347926 -0.044601264 -0.027307275 -0.015531362 1967 0.006739697 0.037235389 0.038869333 0.071407419 1968 0.086275062 0.102036759 0.141643505 0.167310507 1969 0.175437660 0.211500792 0.222942549 0.260029329 1970 0.247870647 0.296181577 0.294470169 0.278217170 1971 0.306686546 0.334684047 0.353729154 0.367702082 > > # Select penalization level via cross-validation > set.seed(123) > cv_results <- elnet_cv(x, freeny$y, alpha = 0.5, + cv_repl = 10, cv_k = 4) > > # Predict the response using the "best" penalization level > predict(cv_results, newdata = freeny[1:5, 2:5]) 1962.25 1962.5 1962.75 1963 1963.25 8.795162 8.807070 8.824535 8.842175 8.882970 > > # Extract the residuals at the "best" penalization level > residuals(cv_results) Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.002801588 -0.015699794 -0.009674689 1963 -0.029165027 0.024539756 0.012794692 0.013400637 1964 -0.014177641 0.006727561 -0.001786569 0.012847301 1965 -0.026214283 -0.003738367 -0.010414370 -0.002497682 1966 -0.014953173 0.014923561 0.008857587 0.002955549 1967 -0.002438578 0.011336771 -0.004817002 -0.002203485 1968 -0.014622332 -0.016038521 0.004783442 0.009509723 1969 0.008935501 0.025135859 0.011008309 0.028171208 1970 -0.021812388 0.015300404 -0.005936032 -0.018484409 1971 -0.011338929 0.005850584 0.003783641 0.007952774 > # Extract the residuals at a more parsimonious penalization level > residuals(cv_results, lambda = "1.5-se") Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.0133914763 -0.0253025812 -0.0180073467 1963 -0.0375085950 0.0197904960 0.0063674275 0.0071084444 1964 -0.0199445085 0.0028559476 -0.0059886764 0.0089958759 1965 -0.0300906249 -0.0052788324 -0.0128420949 -0.0036364879 1966 -0.0166186511 0.0137904162 0.0073750895 0.0016121587 1967 -0.0021618673 0.0116385091 -0.0046227113 0.0002016235 1968 -0.0117185912 -0.0127504673 0.0088610048 0.0131305182 1969 0.0114094522 0.0285319822 0.0147126545 0.0325247699 1970 -0.0158575338 0.0222103711 0.0004937030 -0.0127802705 1971 -0.0035765461 0.0133307584 0.0117139290 0.0154227310 > > > > cleanEx() > nameEx("residuals.pense_fit") > ### * residuals.pense_fit > > flush(stderr()); flush(stdout()) > > ### Name: residuals.pense_fit > ### Title: Extract Residuals > ### Aliases: residuals.pense_fit > > ### ** Examples > > # Compute the LS-EN regularization path for Freeny's revenue data > # (see ?freeny) > data(freeny) > x <- as.matrix(freeny[ , 2:5]) > > regpath <- elnet(x, freeny$y, alpha = 0.75) > > # Predict the response using a specific penalization level > predict(regpath, newdata = freeny[1:5, 2:5], + lambda = regpath$lambda[[1]][[10]]) 1962.25 1962.5 1962.75 1963 1963.25 9.071638 9.075877 9.082341 9.091051 9.103643 > > # Extract the residuals at a certain penalization level > residuals(regpath, lambda = regpath$lambda[[1]][[5]]) Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.396169224 -0.398919454 -0.378220695 1963 -0.384593892 -0.294338479 -0.278030430 -0.259140970 1964 -0.265543684 -0.219857551 -0.206023748 -0.173545125 1965 -0.192086388 -0.146253918 -0.133786817 -0.096671191 1966 -0.090347926 -0.044601264 -0.027307275 -0.015531362 1967 0.006739697 0.037235389 0.038869333 0.071407419 1968 0.086275062 0.102036759 0.141643505 0.167310507 1969 0.175437660 0.211500792 0.222942549 0.260029329 1970 0.247870647 0.296181577 0.294470169 0.278217170 1971 0.306686546 0.334684047 0.353729154 0.367702082 > > # Select penalization level via cross-validation > set.seed(123) > cv_results <- elnet_cv(x, freeny$y, alpha = 0.5, + cv_repl = 10, cv_k = 4) > > # Predict the response using the "best" penalization level > predict(cv_results, newdata = freeny[1:5, 2:5]) 1962.25 1962.5 1962.75 1963 1963.25 8.795162 8.807070 8.824535 8.842175 8.882970 > > # Extract the residuals at the "best" penalization level > residuals(cv_results) Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.002801588 -0.015699794 -0.009674689 1963 -0.029165027 0.024539756 0.012794692 0.013400637 1964 -0.014177641 0.006727561 -0.001786569 0.012847301 1965 -0.026214283 -0.003738367 -0.010414370 -0.002497682 1966 -0.014953173 0.014923561 0.008857587 0.002955549 1967 -0.002438578 0.011336771 -0.004817002 -0.002203485 1968 -0.014622332 -0.016038521 0.004783442 0.009509723 1969 0.008935501 0.025135859 0.011008309 0.028171208 1970 -0.021812388 0.015300404 -0.005936032 -0.018484409 1971 -0.011338929 0.005850584 0.003783641 0.007952774 > # Extract the residuals at a more parsimonious penalization level > residuals(cv_results, lambda = "1.5-se") Qtr1 Qtr2 Qtr3 Qtr4 1962 -0.0133914763 -0.0253025812 -0.0180073467 1963 -0.0375085950 0.0197904960 0.0063674275 0.0071084444 1964 -0.0199445085 0.0028559476 -0.0059886764 0.0089958759 1965 -0.0300906249 -0.0052788324 -0.0128420949 -0.0036364879 1966 -0.0166186511 0.0137904162 0.0073750895 0.0016121587 1967 -0.0021618673 0.0116385091 -0.0046227113 0.0002016235 1968 -0.0117185912 -0.0127504673 0.0088610048 0.0131305182 1969 0.0114094522 0.0285319822 0.0147126545 0.0325247699 1970 -0.0158575338 0.0222103711 0.0004937030 -0.0127802705 1971 -0.0035765461 0.0133307584 0.0117139290 0.0154227310 > > > > ### *