Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
MCMC-Laplace.h
Go to the documentation of this file.
1 5,
487 /* fe_degree = */ 1,
488 "");
489 LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
490 LogPrior::LogGaussian log_prior(0, 2);
491
492 const unsigned int n_theta = 64;
493 for (unsigned int test=0; test<10; ++test)
494 {
495 std::cout << "Generating output for test " << test << std::endl;
496
497 // For each test, read the input file...
498 std::ifstream test_input ("../testing/input." + std::to_string(test) + ".txt");
499 Assert (test_input, ExcIO());
500
501 Vector<double> theta(n_theta);
502 for (unsigned int i=0; i<n_theta; ++i)
503 test_input >> theta[i];
504
505 // ...run it through the forward simulator to get the
506 // z vector (which we then output to a file)...
507 const Vector<double> z = laplace_problem.evaluate(theta);
508 std::ofstream test_output_z ("output." + std::to_string(test) + ".z.txt");
509 z.print(test_output_z, 16);
510
511 // ...and then also evaluate prior and likelihood for these
512 // theta vectors:
513 std::ofstream test_output_likelihood ("output." + std::to_string(test) + ".loglikelihood.txt");
514 test_output_likelihood.precision(12);
515 test_output_likelihood << log_likelihood.log_likelihood(z) << std::endl;
516
517 std::ofstream test_output_prior ("output." + std::to_string(test) + ".logprior.txt");
518 test_output_prior.precision(12);
519 test_output_prior << log_prior.log_prior(theta) << std::endl;
520 }
521}
522@endcode
523This code reads in each of the input files (assuming that the executable is located in a
524build directory parallel to the `testing/` directory) and outputs its results into the
525current directory. The inputs you get from a modified program should then be compared
526against the ones stored in the `testing/` directory. They should match to several digits.
527
528
529
530An alternative implementation in Matlab
531---------------------------------------
532
533To facilitate experiments, this directory also contains alternative
534implementations of the benchmark. The first one was written by David
535Aristoff in Matlab and can be found in the `Matlab/` directory. As is
536common in Matlab programs, each function is provided in its own
537file. We have verified that the program generates the same results (to
53812 or more digits) as the C++ program, using the tests mentioned in
539the previous section.
540
541
542
543An alternative implementation in Python
544---------------------------------------
545
546Another alternative, written by Wolfgang Bangerth, can be found in the
547`Python/` directory and uses Python3. As is common for Python
548programs, the whole program is provided in one file. As for the Matlab
549version, we have verified that the program generates the same results
550(to 12 or more digits) as the C++ program, using the tests mentioned
551in the previous section. In fact, if you just execute the program
552as-is, it runs diagnostics that output the errors.
553
554This Python version is essentially a literal translation of the Matlab
555code. It is not nearly as efficient (taking around 5 times as much
556time for each function evaluation) and could probably be optimized
557substantially if desired. A good starting point is the insertion of
558the local elements into the global matrix in the line
559@code{.sh}
560 A[np.ix_(dof,dof)] += theta_loc * A_loc
561@endcode
562that takes up a substantial fraction of the overall run time.
563
564
565<a name="ann-Matlab/exact_values.m"></a>
566<h1>Annotated version of Matlab/exact_values.m</h1>
567@code{.m}
568%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
569%%%%%%%%%%%% list of "exact" measurement values, z_hat %%%%%%%%%%%%%%%%%%%%
570%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
571
572z_hat = [0.06076511762259369;
573 0.09601910120848481;
574 0.1238852517838584;
575 0.1495184117375201;
576 0.1841596127549784;
577 0.2174525028261122;
578 0.2250996160898698;
579 0.2197954769002993;
580 0.2074695698370926;
581 0.1889996477663016;
582 0.1632722532153726;
583 0.1276782480038186;
584 0.07711845915789312;
585 0.09601910120848552;
586 0.2000589533367983;
587 0.3385592591951766;
588 0.3934300024647806;
589 0.4040223892461541;
590 0.4122329537843092;
591 0.4100480091545554;
592 0.3949151637189968;
593 0.3697873264791232;
594 0.33401826235924;
595 0.2850397806663382;
596 0.2184260032478671;
597 0.1271121156350957;
598 0.1238852517838611;
599 0.3385592591951819;
600 0.7119285162766475;
601 0.8175712861756428;
602 0.6836254116578105;
603 0.5779452419831157;
604 0.5555615956136897;
605 0.5285181561736719;
606 0.491439702849224;
607 0.4409367494853282;
608 0.3730060082060772;
609 0.2821694983395214;
610 0.1610176733857739;
611 0.1495184117375257;
612 0.3934300024647929;
613 0.8175712861756562;
614 0.9439154625527653;
615 0.8015904115095128;
616 0.6859683749254024;
617 0.6561235366960599;
618 0.6213197201867315;
619 0.5753611315000049;
620 0.5140091754526823;
621 0.4325325506354165;
622 0.3248315148915482;
623 0.1834600412730086;
624 0.1841596127549917;
625 0.4040223892461832;
626 0.6836254116578439;
627 0.8015904115095396;
628 0.7870119561144977;
629 0.7373108331395808;
630 0.7116558878070463;
631 0.6745179049094283;
632 0.6235300574156917;
633 0.5559332704045935;
634 0.4670304994474178;
635 0.3499809143811;
636 0.19688263746294;
637 0.2174525028261253;
638 0.4122329537843404;
639 0.5779452419831566;
640 0.6859683749254372;
641 0.7373108331396063;
642 0.7458811983178246;
643 0.7278968022406559;
644 0.6904793535357751;
645 0.6369176452710288;
646 0.5677443693743215;
647 0.4784738764865867;
648 0.3602190632823262;
649 0.2031792054737325;
650 0.2250996160898818;
651 0.4100480091545787;
652 0.5555615956137137;
653 0.6561235366960938;
654 0.7116558878070715;
655 0.727896802240657;
656 0.7121928678670187;
657 0.6712187391428729;
658 0.6139157775591492;
659 0.5478251665295381;
660 0.4677122687599031;
661 0.3587654911000848;
662 0.2050734291675918;
663 0.2197954769003094;
664 0.3949151637190157;
665 0.5285181561736911;
666 0.6213197201867471;
667 0.6745179049094407;
668 0.690479353535786;
669 0.6712187391428787;
670 0.6178408289359514;
671 0.5453605027237883;
672 0.489575966490909;
673 0.4341716881061278;
674 0.3534389974779456;
675 0.2083227496961347;
676 0.207469569837099;
677 0.3697873264791366;
678 0.4914397028492412;
679 0.5753611315000203;
680 0.6235300574157017;
681 0.6369176452710497;
682 0.6139157775591579;
683 0.5453605027237935;
684 0.4336604929612851;
685 0.4109641743019312;
686 0.3881864790111245;
687 0.3642640090182592;
688 0.2179599909280145;
689 0.1889996477663011;
690 0.3340182623592461;
691 0.4409367494853381;
692 0.5140091754526943;
693 0.5559332704045969;
694 0.5677443693743304;
695 0.5478251665295453;
696 0.4895759664908982;
697 0.4109641743019171;
698 0.395727260284338;
699 0.3778949322004734;
700 0.3596268271857124;
701 0.2191250268948948;
702 0.1632722532153683;
703 0.2850397806663325;
704 0.373006008206081;
705 0.4325325506354207;
706 0.4670304994474315;
707 0.4784738764866023;
708 0.4677122687599041;
709 0.4341716881061055;
710 0.388186479011099;
711 0.3778949322004602;
712 0.3633362567187364;
713 0.3464457261905399;
714 0.2096362321365655;
715 0.1276782480038148;
716 0.2184260032478634;
717 0.2821694983395252;
718 0.3248315148915535;
719 0.3499809143811097;
720 0.3602190632823333;
721 0.3587654911000799;
722 0.3534389974779268;
723 0.3642640090182283;
724 0.35962682718569;
725 0.3464457261905295;
726 0.3260728953424643;
727 0.180670595355394;
728 0.07711845915789244;
729 0.1271121156350963;
730 0.1610176733857757;
731 0.1834600412730144;
732 0.1968826374629443;
733 0.2031792054737354;
734 0.2050734291675885;
735 0.2083227496961245;
736 0.2179599909279998;
737 0.2191250268948822;
738 0.2096362321365551;
739 0.1806705953553887;
740 0.1067965550010013];
741@endcode
742
743
744<a name="ann-Matlab/forward_solver.m"></a>
745<h1>Annotated version of Matlab/forward_solver.m</h1>
746@code{.m}
747%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
748%%%%%%%%%%%%%%%%%%%%%% forward solver function %%%%%%%%%%%%%%%%%%%%%%%%%%%%
749%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
750%INPUTS:
751%theta = current 8x8 parameter matrix
752%lbl = cell labeling function
753%A_loc = matrix of local contributions to A
754%Id = Identity matrix of size 128x128
755%boundaries = labels of boundary cells
756%b = right hand side of linear system (AU = b)
757%M = measurement matrix
758%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
759%OUTPUTS:
760%z = vector of measurements
761%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
762
763function z = forward_solver(theta,lbl,A_loc,Id,boundaries,b,M)
764
765%initialize matrix A for FEM linear solve, AU = b
766A = zeros(33^2,33^2);
767
768%loop over cells to build A
769for i=0:31
770 for j=0:31 %build A by summing over contribution from each cell
771
772 %find local coefficient in 8x8 grid
773 theta_loc = theta(floor(i/4)+1,floor(j/4)+1);
774
775 %update A by including contribution from cell (i,j)
776 dof = [lbl(i,j),lbl(i,j+1),lbl(i+1,j+1),lbl(i+1,j)];
777 A(dof,dof) = A(dof,dof) + theta_loc*A_loc;
778 end
779end
780
781%enforce boundary condition
782A(boundaries,:) = Id(boundaries,:);
783A(:,boundaries) = Id(:,boundaries);
784
785%sparsify A
786A = sparse(A);
787
788%solve linear equation for coefficients, U
789U = A\b;
790
791%get new z values
792z = M*U;
793@endcode
794
795
796<a name="ann-Matlab/get_statistics.m"></a>
797<h1>Annotated version of Matlab/get_statistics.m</h1>
798@code{.m}
799%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
800%%%%%%%%%%%%%%%%% compute statistics on data set %%%%%%%%%%%%%%%%%%%%%%%%%%
801%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
802%INPUTS:
803%data = tensor of theta samples from each lag time and chain
804%theta_means = means of theta over each independent chain
805%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
806%OUTPUTS:
807%theta_mean = overall mean of chains
808%covars = covariance matrices of each independent chain
809%autocovar = mean of autocovariance matrix over all the chains
810%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
811
812function [theta_mean,covars,autocovar] = get_statistics(data,theta_means);
813
814%compute overall mean of data, and get size of data matrix
815theta_mean = mean(theta_means,3);
816[~,~,L,N] = size(data);
817
818%initialize covariance matrices and mean autocovariance matrix
819covars = zeros(64,64,N);
820autocovar = zeros(64,64,2*L-1);
821
822%compute covariance matrices and mean autocovariance matrix
823for n=1:N %loop over independent Markov chains
824
825 %get data from chain n
826 data_ = reshape(permute(data(:,:,:,n),[3 2 1]),[L 64]);
827
828 %compute autocovariance matrix of chain n
829 mat = xcov(data_,'unbiased');
830
831 %store covariance matrix of chain n
832 covars(:,:,n) = reshape(mat(L,:),64,64);
833
834 %update mean autocovariance matrix
835 autocovar = autocovar + reshape(mat',[64 64 2*L-1]);
836end
837
838%compute mean of autocovariance matrix
839autocovar = autocovar(1:64,1:64,L:2*L-1)/N;
840@endcode
841
842
843<a name="ann-Matlab/log_probability.m"></a>
844<h1>Annotated version of Matlab/log_probability.m</h1>
845@code{.m}
846%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
847%%%%%%%%%%%%%%%%% compute log probability, log pi %%%%%%%%%%%%%%%%%%%%%%%%%
848%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
849%INPUTS:
850%theta = current 8x8 parameter matrix
851%z = current vector of measurements
852%z_hat = vector of "exact" measurements
853%sig = standard deviation parameter in likelihood
854%sig_pr = standard deviation parameter in prior
855%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
856%OUTPUTS:
857%log_pi = logarithm of posterior probability
858%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
859
860function log_pi = log_probability(theta,z,z_hat,sig,sig_pr)
861
862%compute log likelihood
863log_L = -sum((z-z_hat).^2)/(2*sig^2);
864
865%compute log prior
866log_pi_pr = -sum(log(theta).^2,'all')/(2*sig_pr^2);
867
868%compute log posterior
869log_pi = log_L+log_pi_pr;
870@endcode
871
872
873<a name="ann-Matlab/main.m"></a>
874<h1>Annotated version of Matlab/main.m</h1>
875@code{.m}
876%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
877%%%%%%%%% Run MCMC sampler to estimate posterior distribution %%%%%%%%%%%%%
878%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
879
880%define number of chains, chain length, and lag time
881N = input('number of independent Markov chains: ');
882N_L = input('length of each Markov chain: ');
883lag = input('lag time for measurements: ');
884workers = input('number of parallel workers: ');
885L = N_L/lag;
886
887%open Matlab parallel pool
888parpool(workers)
889
890%load precomputations
891load precomputations.mat
892
893%define lag time and data matrix
894data = zeros(8,8,L,N); %data matrix of samples at lag times
895theta_means = zeros(8,8,N); %overall mean of theta
896
897tic
898
899parfor n=1:N
900
901 %set initial theta, theta mean, and z values of chain
902 theta = theta0;
903 theta_mean = 0;
904 z = z0;
905
906 for m=1:L
907
908 for l=1:lag
909
910 %define proposal, theta_tilde
911 xi = normrnd(0,sig_prop,[8 8]);
912 theta_tilde = theta.*exp(xi);
913
914 %compute new z values
915 z_tilde = forward_solver_(theta_tilde);
916
917 %compute posterior log probability of theta_tilde
918 log_pi_tilde = log_probability_(theta_tilde,z_tilde);
919 log_pi = log_probability_(theta,z);
920
921 %compute acceptance probability; accept proposal appropriately
922 accept = exp(log_pi_tilde-log_pi) ...
923 *prod(theta_tilde./theta,'all');
924 if rand<accept
925 theta = theta_tilde; %accept new theta values
926 z = z_tilde; %record associated measurements
927 end
928
929 %update mean of theta
930 theta_mean = theta_mean + theta;
931
932 end
933
934 %update data matrix
935 data(:,:,m,n) = theta;
936
937 end
938
939 %update theta means
940 theta_means(:,:,n) = theta_mean/N_L;
941
942end
943
944toc
945
946%compute statistics on data set
947[theta_mean,covars,autocovar] = get_statistics(data,theta_means);
948
949%save data to Matlab workspace, labeled by N and N_L
950save (['data_N_' num2str(N) '_N_L_ ' num2str(N_L) '.mat'])
951@endcode
952
953
954<a name="ann-Matlab/plot_solution.m"></a>
955<h1>Annotated version of Matlab/plot_solution.m</h1>
956@code{.m}
957%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
958%%%%%%%%%%%%%%% plots solution, u, to Poisson equation %%%%%%%%%%%%%%%%%%%%
959%%%%%%%%%%%%%%%% associated to theta and a 32x32 mesh %%%%%%%%%%%%%%%%%%%%%
960%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
961
962%input matrix theta for plotting (e.g., theta = theta_hat)
963theta = input('choose theta for plotting u: theta = ');
964
965%construct mass matrix, M_plot, for plotting
966xsp = 0:0.02:1;
967n = length(xsp);
968Mp = zeros(n,n,33^2);
969for k=1:33^2
970 c = inv_lbl(k);
971 for i=1:n
972 for j=1:n
973 Mp(i,j,k) = phi(xsp(i)-h*c(1),xsp(j)-h*c(2));
974 end
975 end
976end
977Mp = reshape(Mp,[n^2 33^2]);
978
979%run forward solver on mean of theta
980A = zeros(33^2,33^2);
981for i=0:31
982 for j=0:31 %build A by summing over contribution from each cell
983
984 %find local coefficient in 8x8 grid
985 theta_loc = theta(floor(i/4)+1,floor(j/4)+1);
986
987 %update A by including contribution from cell (i,j)
988 dof = [lbl(i,j),lbl(i,j+1),lbl(i+1,j+1),lbl(i+1,j)];
989 A(dof,dof) = A(dof,dof) + theta_loc*A_loc;
990 end
991end
992
993%enforce boundary condition
994A(boundaries,:) = Id(boundaries,:);
995
996%sparsify A
997A = sparse(A);
998
999%solve linear equation for coefficients, U
1000U = A\b;
1001
1002%close all current plots
1003close all
1004
1005%plot solution
1006figure
1007zs = reshape(Mp*U,[n n]);
1008surf(xsp,xsp,zs)
1009@endcode
1010
1011
1012<a name="ann-Matlab/precomputations.m"></a>
1013<h1>Annotated version of Matlab/precomputations.m</h1>
1014@code{.m}
1015%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1016%%%%%%% do all precomputations necessary for MCMC simulations %%%%%%%%%%%%%
1017%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1018
1019%define mesh width
1020h = 1/32;
1021
1022%define characteristic function of unit square
1023S = @(x,y) heaviside(x).*heaviside(y) ...
1024 .*(1-heaviside(x-h)).*(1-heaviside(y-h));
1025
1026%define tent function on the domain [-h,h]x[-h,h]
1027phi = @(x,y) ((x+h).*(y+h).*S(x+h,y+h) + (h-x).*(h-y).*S(x,y) ...
1028 + (x+h).*(h-y).*S(x+h,y) + (h-x).*(y+h).*S(x,y+h))/h^2;
1029
1030%define function that converts from (i,j) to dof, and its inverse
1031lbl = @(i,j) 33*j+i+1;
1032inv_lbl = @(k) [k-1-33*floor((k-1)/33),floor((k-1)/33)];
1033
1034%construct measurement matrix, M
1035xs = 1/14:1/14:13/14; %measurement points
1036M = zeros(13,13,33^2);
1037for k=1:33^2
1038 c = inv_lbl(k);
1039 for i=1:13
1040 for j=1:13
1041 M(i,j,k) = phi(xs(i)-h*c(1),xs(j)-h*c(2));
1042 end
1043 end
1044end
1045M = reshape(M,[13^2 33^2]);
1046
1047%construct exact coefficient matrix, theta_hat
1048theta_hat = ones(8,8);
1049theta_hat(2:3,2:3) = 0.1;
1050theta_hat(6:7,6:7) = 10;
1051
1052%construct local overlap matrix, A_loc, and identity matrix Id
1053A_loc = [2/3 -1/6 -1/3 -1/6;
1054 -1/6 2/3 -1/6 -1/3;
1055 -1/3 -1/6 2/3 -1/6;
1056 -1/6 -1/3 -1/6 2/3];
1057Id = eye(33^2);
1058
1059%locate boundary labels
1060boundaries = [lbl(0:1:32,0),lbl(0:1:32,32),lbl(0,1:1:31),lbl(32,1:1:31)];
1061
1062%define RHS of FEM linear system, AU = b
1063b = ones(33^2,1)*10*h^2;
1064b(boundaries) = zeros(128,1); %enforce boundary conditions on b
1065
1066%load exact z_hat values
1067exact_values
1068
1069%set global parameters and functions for simulation
1070sig = 0.05; %likelihood standard deviation
1071sig_pr = 2; %prior (log) standard deviation
1072sig_prop = 0.0725; %proposal (log) standard deviation
1073theta0 = ones(8,8); %initial theta values
1074forward_solver_ = @(theta) ...
1075 forward_solver(theta,lbl,A_loc,Id,boundaries,b,M);
1076log_probability_ = @(theta,z) log_probability(theta,z,z_hat,sig,sig_pr);
1077
1078%find initial z values
1079z0 = forward_solver_(theta0);
1080
1081save precomputations.mat
1082@endcode
1083
1084
1085<a name="ann-Python/forward_solver.py"></a>
1086<h1>Annotated version of Python/forward_solver.py</h1>
1087@code{.py}
1088import numpy as np
1089import scipy.sparse
1090from scipy.sparse.linalg import spsolve
1091import time
1092
1093###########################################################################
1094############ list of "exact" measurement values, z_hat ####################
1095###########################################################################
1096
1097z_hat = np.array(
1098 [0.06076511762259369, 0.09601910120848481,
1099 0.1238852517838584, 0.1495184117375201,
1100 0.1841596127549784, 0.2174525028261122,
1101 0.2250996160898698, 0.2197954769002993,
1102 0.2074695698370926, 0.1889996477663016,
1103 0.1632722532153726, 0.1276782480038186,
1104 0.07711845915789312, 0.09601910120848552,
1105 0.2000589533367983, 0.3385592591951766,
1106 0.3934300024647806, 0.4040223892461541,
1107 0.4122329537843092, 0.4100480091545554,
1108 0.3949151637189968, 0.3697873264791232,
1109 0.33401826235924, 0.2850397806663382,
1110 0.2184260032478671, 0.1271121156350957,
1111 0.1238852517838611, 0.3385592591951819,
1112 0.7119285162766475, 0.8175712861756428,
1113 0.6836254116578105, 0.5779452419831157,
1114 0.5555615956136897, 0.5285181561736719,
1115 0.491439702849224, 0.4409367494853282,
1116 0.3730060082060772, 0.2821694983395214,
1117 0.1610176733857739, 0.1495184117375257,
1118 0.3934300024647929, 0.8175712861756562,
1119 0.9439154625527653, 0.8015904115095128,
1120 0.6859683749254024, 0.6561235366960599,
1121 0.6213197201867315, 0.5753611315000049,
1122 0.5140091754526823, 0.4325325506354165,
1123 0.3248315148915482, 0.1834600412730086,
1124 0.1841596127549917, 0.4040223892461832,
1125 0.6836254116578439, 0.8015904115095396,
1126 0.7870119561144977, 0.7373108331395808,
1127 0.7116558878070463, 0.6745179049094283,
1128 0.6235300574156917, 0.5559332704045935,
1129 0.4670304994474178, 0.3499809143811,
1130 0.19688263746294, 0.2174525028261253,
1131 0.4122329537843404, 0.5779452419831566,
1132 0.6859683749254372, 0.7373108331396063,
1133 0.7458811983178246, 0.7278968022406559,
1134 0.6904793535357751, 0.6369176452710288,
1135 0.5677443693743215, 0.4784738764865867,
1136 0.3602190632823262, 0.2031792054737325,
1137 0.2250996160898818, 0.4100480091545787,
1138 0.5555615956137137, 0.6561235366960938,
1139 0.7116558878070715, 0.727896802240657,
1140 0.7121928678670187, 0.6712187391428729,
1141 0.6139157775591492, 0.5478251665295381,
1142 0.4677122687599031, 0.3587654911000848,
1143 0.2050734291675918, 0.2197954769003094,
1144 0.3949151637190157, 0.5285181561736911,
1145 0.6213197201867471, 0.6745179049094407,
1146 0.690479353535786, 0.6712187391428787,
1147 0.6178408289359514, 0.5453605027237883,
1148 0.489575966490909, 0.4341716881061278,
1149 0.3534389974779456, 0.2083227496961347,
1150 0.207469569837099, 0.3697873264791366,
1151 0.4914397028492412, 0.5753611315000203,
1152 0.6235300574157017, 0.6369176452710497,
1153 0.6139157775591579, 0.5453605027237935,
1154 0.4336604929612851, 0.4109641743019312,
1155 0.3881864790111245, 0.3642640090182592,
1156 0.2179599909280145, 0.1889996477663011,
1157 0.3340182623592461, 0.4409367494853381,
1158 0.5140091754526943, 0.5559332704045969,
1159 0.5677443693743304, 0.5478251665295453,
1160 0.4895759664908982, 0.4109641743019171,
1161 0.395727260284338, 0.3778949322004734,
1162 0.3596268271857124, 0.2191250268948948,
1163 0.1632722532153683, 0.2850397806663325,
1164 0.373006008206081, 0.4325325506354207,
1165 0.4670304994474315, 0.4784738764866023,
1166 0.4677122687599041, 0.4341716881061055,
1167 0.388186479011099, 0.3778949322004602,
1168 0.3633362567187364, 0.3464457261905399,
1169 0.2096362321365655, 0.1276782480038148,
1170 0.2184260032478634, 0.2821694983395252,
1171 0.3248315148915535, 0.3499809143811097,
1172 0.3602190632823333, 0.3587654911000799,
1173 0.3534389974779268, 0.3642640090182283,
1174 0.35962682718569, 0.3464457261905295,
1175 0.3260728953424643, 0.180670595355394,
1176 0.07711845915789244, 0.1271121156350963,
1177 0.1610176733857757, 0.1834600412730144,
1178 0.1968826374629443, 0.2031792054737354,
1179 0.2050734291675885, 0.2083227496961245,
1180 0.2179599909279998, 0.2191250268948822,
1181 0.2096362321365551, 0.1806705953553887,
1182 0.1067965550010013])
1183
1184
1185###########################################################################
1186####### do all precomputations necessary for MCMC simulations #############
1187###########################################################################
1188
1189# Define the mesh width
1190h = 1/32
1191
1192# Define characteristic function of unit square
1193def heaviside(x) :
1194 if x<0 :
1195 return 0
1196 else :
1197 return 1
1198
1199def S(x,y) :
1200 return heaviside(x)*heaviside(y) * (1-heaviside(x-h))*(1-heaviside(y-h));
1201
1202# Define tent function on the domain [0,2h]x[0,2h]
1203def phi(x,y) :
1204 return ((x+h)*(y+h)*S(x+h,y+h) + (h-x)*(h-y)*S(x,y)
1205 + (x+h)*(h-y)*S(x+h,y) + (h-x)*(y+h)*S(x,y+h))/h**2
1206
1207# Define conversion function for dof's from 2D to scalar label, and
1208# its inverse
1209def ij_to_dof_index(i,j) :
1210 return 33*j+i
1211
1212def inv_ij_to_dof_index(k) :
1213 return [k-33*int(k/33),int(k/33)]
1214
1215
1216# Construct measurement matrix, M, for measurements
1217xs = np.arange(1./14,13./14,1./14); #measurement points
1218
1219M = np.zeros((13,13,33**2));
1220for k in range(33**2) :
1221 c = inv_ij_to_dof_index(k)
1222 for i in range(13) :
1223 for j in range(13) :
1224 M[i,j,k] = phi(xs[i]-h*c[0], xs[j]-h*c[1])
1225M = M.reshape((13**2, 33**2))
1226M = scipy.sparse.csr_matrix(M);
1227
1228# Construct local overlap matrix, A_loc, and identity matrix Id
1229A_loc = np.array([[2./3, -1./6, -1./3, -1./6],
1230 [-1./6, 2./3, -1./6, -1./3],
1231 [-1./3, -1./6, 2./3, -1./6],
1232 [-1./6, -1./3, -1./6, 2./3]])
1233Id = np.eye(33**2,33**2)
1234
1235# Locate boundary labels
1236boundaries = ([ij_to_dof_index(i,0) for i in range(33)] +
1237 [ij_to_dof_index(i,32) for i in range(33)] +
1238 [ij_to_dof_index(0,j+1) for j in range(31)] +
1239 [ij_to_dof_index(32,j+1) for j in range(31)])
1240
1241# Define RHS of FEM linear system, AU = b
1242b = np.ones(33**2)*10*h**2
1243b[boundaries] = 0 #enforce boundary conditions on b
1244
1245
1246
1247
1248
1249###########################################################################
1250###################### forward solver function ############################
1251###########################################################################
1252
1253def forward_solver(theta) :
1254 # Initialize matrix A for FEM linear solve, AU = b
1255 A = np.zeros((33**2,33**2))
1256
1257 # Build A by summing over contribution from each cell
1258 for i in range(32) :
1259 for j in range (32) :
1260 # Find local coefficient in 8x8 grid
1261 theta_loc = theta[int(i/4)+int(j/4)*8]
1262
1263 # Update A by including contribution from cell (i,j)
1264 dof = [ij_to_dof_index(i,j),
1265 ij_to_dof_index(i,j+1),
1266 ij_to_dof_index(i+1,j+1),
1267 ij_to_dof_index(i+1,j)]
1268 A[np.ix_(dof,dof)] += theta_loc * A_loc
1269
1270 # Enforce boundary condition: Zero out rows and columns, then
1271 # put a one back into the diagonal entries.
1272 A[boundaries,:] = 0
1273 A[:,boundaries] = 0
1274 A[boundaries,boundaries] = 1
1275
1276 # Solve linear equation for coefficients, U, and then
1277 # get the Z vector by multiplying by the measurement matrix
1278 u = spsolve(scipy.sparse.csr_matrix(A), b)
1279
1280 z = M * u
1281
1282 return z
1283
1284
1285
1286
1287###########################################################################
1288################# compute log probability, log pi #########################
1289###########################################################################
1290
1291def log_likelihood(theta) :
1292 z = forward_solver(theta)
1293 misfit = z - z_hat
1294 sig = 0.05 #likelihood standard deviation
1295 return -np.dot(misfit,misfit)/(2*sig**2)
1296
1297def log_prior(theta) :
1298 sig_pr = 2 #prior (log) standard deviation
1299 return -np.linalg.norm(np.log(theta))**2/(2*sig_pr**2)
1300
1301def log_posterior(theta) :
1302 return log_likelihood(theta) + log_prior(theta)
1303
1304
1305
1306###########################################################################
1307############# A function to test against known output #####################
1308###########################################################################
1309
1310
1311def verify_against_stored_tests() :
1312 for i in range(10) :
1313 print ("Verifying against data set", i)
1314
1315 # Read the input vector
1316 f_input = open ("../testing/input.{}.txt".format(i), 'r')
1317 theta = np.fromfile(f_input, count=64, sep=" ")
1318
1319 # Then compute both the forward solution and its statistics.
1320 # This is not efficiently written here (it calls the forward
1321 # solver twice), but we don't care about efficiency here since
1322 # we are only computing with ten samples
1323 this_z = forward_solver(theta)
1324 this_log_likelihood = log_likelihood(theta)
1325 this_log_prior = log_prior(theta)
1326
1327 # Then also read the reference output generated by the C++ program:
1328 f_output_z = open ("../testing/output.{}.z.txt".format(i), 'r')
1329 f_output_likelihood = open ("../testing/output.{}.loglikelihood.txt".format(i), 'r')
1330 f_output_prior = open ("../testing/output.{}.logprior.txt".format(i), 'r')
1331
1332 reference_z = np.fromfile(f_output_z, count=13**2, sep=" ")
1333 reference_log_likelihood = float(f_output_likelihood.read())
1334 reference_log_prior = float(f_output_prior.read())
1335
1336 print (" || z-z_ref || : ",
1337 np.linalg.norm(this_z - reference_z))
1338 print (" log likelihood : ",
1339 "Python value=", this_log_likelihood,
1340 "(C++ reference value=", reference_log_likelihood,
1341 ", error=", abs(this_log_likelihood - reference_log_likelihood),
1342 ")")
1343 print (" log prior : ",
1344 "Python value=", this_log_prior,
1345 "(C++ reference value=", reference_log_prior,
1346 ", error=", abs(this_log_prior - reference_log_prior),
1347 ")")
1348
1349
1350def time_forward_solver() :
1351 begin = time.time()
1352
1353 n_runs = 100
1354 for i in range(n_runs) :
1355 # Create a random vector (with entries between 0 and 1), scale
1356 # it by a factor of 4, subtract 2, then take the exponential
1357 # of each entry to get random entries between e^{-2} and
1358 # e^{+2}
1359 theta = np.exp(np.random.rand(64) * 4 - 2)
1360 z = forward_solver(theta)
1361 end = time.time()
1362 print ("Time per forward evaluation:", (end-begin)/n_runs)
1363
1364verify_against_stored_tests()
1365time_forward_solver()
1366@endcode
1367
1368
1369<a name="ann-mcmc-laplace.cc"></a>
1370<h1>Annotated version of mcmc-laplace.cc</h1>
1371 *
1372 *
1373 *
1374 *
1375 * @code
1376 *   /* ---------------------------------------------------------------------
1377 *   *
1378 *   * Copyright (C) 2019 by the deal.II authors and Wolfgang Bangerth.
1379 *   *
1380 *   * This file is part of the deal.II library.
1381 *   *
1382 *   * The deal.II library is free software; you can use it, redistribute
1383 *   * it, and/or modify it under the terms of the GNU Lesser General
1384 *   * Public License as published by the Free Software Foundation; either
1385 *   * version 2.1 of the License, or (at your option) any later version.
1386 *   * The full text of the license can be found in the file LICENSE.md at
1387 *   * the top level directory of deal.II.
1388 *   *
1389 *   * ---------------------------------------------------------------------
1390 *  
1391 *   *
1392 *   * Author: Wolfgang Bangerth, Colorado State University, 2019.
1393 *   */
1394 *  
1395 *  
1396 *  
1397 *   #include <deal.II/base/timer.h>
1398 *   #include <deal.II/base/multithread_info.h>
1399 *   #include <deal.II/grid/tria.h>
1400 *   #include <deal.II/dofs/dof_handler.h>
1401 *   #include <deal.II/grid/grid_generator.h>
1402 *   #include <deal.II/grid/tria_accessor.h>
1403 *   #include <deal.II/grid/tria_iterator.h>
1404 *   #include <deal.II/dofs/dof_accessor.h>
1405 *   #include <deal.II/fe/fe_q.h>
1406 *   #include <deal.II/dofs/dof_tools.h>
1407 *   #include <deal.II/fe/fe_values.h>
1408 *   #include <deal.II/base/quadrature_lib.h>
1409 *   #include <deal.II/base/function.h>
1410 *   #include <deal.II/numerics/vector_tools.h>
1411 *   #include <deal.II/numerics/matrix_tools.h>
1412 *   #include <deal.II/lac/vector.h>
1413 *   #include <deal.II/lac/full_matrix.h>
1414 *   #include <deal.II/lac/sparse_matrix.h>
1415 *   #include <deal.II/lac/dynamic_sparsity_pattern.h>
1416 *   #include <deal.II/lac/solver_cg.h>
1417 *   #include <deal.II/lac/precondition.h>
1418 *   #include <deal.II/lac/sparse_ilu.h>
1419 *  
1420 *   #include <deal.II/numerics/data_out.h>
1421 *  
1422 *   #include <fstream>
1423 *   #include <iostream>
1424 *   #include <random>
1425 *  
1426 *   #include <deal.II/base/logstream.h>
1427 *  
1428 *   using namespace dealii;
1429 *  
1430 *  
1431 * @endcode
1432 *
1433 * The following is a namespace in which we define the solver of the PDE.
1434 * The main class implements an abstract `Interface` class declared at
1435 * the top, which provides for an `evaluate()` function that, given
1436 * a coefficient vector, solves the PDE discussed in the Readme file
1437 * and then evaluates the solution at the 169 mentioned points.
1438 *
1439
1440 *
1441 * The solver follows the basic layout of @ref step_4 "step-4", though it precomputes
1442 * a number of things in the `setup_system()` function, such as the
1443 * evaluation of the matrix that corresponds to the point evaluations,
1444 * as well as the local contributions to matrix and right hand side.
1445 *
1446
1447 *
1448 * Rather than commenting on everything in detail, in the following
1449 * we will only document those things that are not already clear from
1450 * @ref step_4 "step-4" and a small number of other tutorial programs.
1451 *
1452 * @code
1453 *   namespace ForwardSimulator
1454 *   {
1455 *   class Interface
1456 *   {
1457 *   public:
1458 *   virtual Vector<double> evaluate(const Vector<double> &coefficients) = 0;
1459 *  
1460 *   virtual ~Interface() = default;
1461 *   };
1462 *  
1463 *  
1464 *  
1465 *   template <int dim>
1466 *   class PoissonSolver : public Interface
1467 *   {
1468 *   public:
1469 *   PoissonSolver(const unsigned int global_refinements,
1470 *   const unsigned int fe_degree,
1471 *   const std::string &dataset_name);
1472 *   virtual Vector<double>
1473 *   evaluate(const Vector<double> &coefficients) override;
1474 *  
1475 *   private:
1476 *   void make_grid(const unsigned int global_refinements);
1477 *   void setup_system();
1478 *   void assemble_system(const Vector<double> &coefficients);
1479 *   void solve();
1480 *   void output_results(const Vector<double> &coefficients) const;
1481 *  
1482 *   Triangulation<dim> triangulation;
1483 *   FE_Q<dim> fe;
1484 *   DoFHandler<dim> dof_handler;
1485 *  
1486 *   FullMatrix<double> cell_matrix;
1487 *   Vector<double> cell_rhs;
1488 *   std::map<types::global_dof_index,double> boundary_values;
1489 *  
1490 *   SparsityPattern sparsity_pattern;
1491 *   SparseMatrix<double> system_matrix;
1492 *  
1493 *   Vector<double> solution;
1494 *   Vector<double> system_rhs;
1495 *  
1496 *   std::vector<Point<dim>> measurement_points;
1497 *  
1498 *   SparsityPattern measurement_sparsity;
1499 *   SparseMatrix<double> measurement_matrix;
1500 *  
1501 *   TimerOutput timer;
1502 *   unsigned int nth_evaluation;
1503 *  
1504 *   const std::string &dataset_name;
1505 *   };
1506 *  
1507 *  
1508 *  
1509 *   template <int dim>
1510 *   PoissonSolver<dim>::PoissonSolver(const unsigned int global_refinements,
1511 *   const unsigned int fe_degree,
1512 *   const std::string &dataset_name)
1513 *   : fe(fe_degree)
1514 *   , dof_handler(triangulation)
1515 *   , timer(std::cout, TimerOutput::summary, TimerOutput::cpu_times)
1516 *   , nth_evaluation(0)
1517 *   , dataset_name(dataset_name)
1518 *   {
1519 *   make_grid(global_refinements);
1520 *   setup_system();
1521 *   }
1522 *  
1523 *  
1524 *  
1525 *   template <int dim>
1526 *   void PoissonSolver<dim>::make_grid(const unsigned int global_refinements)
1527 *   {
1528 *   Assert(global_refinements >= 3,
1529 *   ExcMessage("This program makes the assumption that the mesh for the "
1530 *   "solution of the PDE is at least as fine as the one used "
1531 *   "in the definition of the coefficient."));
1532 *   GridGenerator::hyper_cube(triangulation, 0, 1);
1533 *   triangulation.refine_global(global_refinements);
1534 *  
1535 *   std::cout << " Number of active cells: " << triangulation.n_active_cells()
1536 *   << std::endl;
1537 *   }
1538 *  
1539 *  
1540 *  
1541 *   template <int dim>
1542 *   void PoissonSolver<dim>::setup_system()
1543 *   {
1544 * @endcode
1545 *
1546 * First define the finite element space:
1547 *
1548 * @code
1549 *   dof_handler.distribute_dofs(fe);
1550 *  
1551 *   std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
1552 *   << std::endl;
1553 *  
1554 * @endcode
1555 *
1556 * Then set up the main data structures that will hold the discrete problem:
1557 *
1558 * @code
1559 *   {
1560 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
1561 *   DoFTools::make_sparsity_pattern(dof_handler, dsp);
1562 *   sparsity_pattern.copy_from(dsp);
1563 *  
1564 *   system_matrix.reinit(sparsity_pattern);
1565 *  
1566 *   solution.reinit(dof_handler.n_dofs());
1567 *   system_rhs.reinit(dof_handler.n_dofs());
1568 *   }
1569 *  
1570 * @endcode
1571 *
1572 * And then define the tools to do point evaluation. We choose
1573 * a set of 13x13 points evenly distributed across the domain:
1574 *
1575 * @code
1576 *   {
1577 *   const unsigned int n_points_per_direction = 13;
1578 *   const double dx = 1. / (n_points_per_direction + 1);
1579 *  
1580 *   for (unsigned int x = 1; x <= n_points_per_direction; ++x)
1581 *   for (unsigned int y = 1; y <= n_points_per_direction; ++y)
1582 *   measurement_points.emplace_back(x * dx, y * dx);
1583 *  
1584 * @endcode
1585 *
1586 * First build a full matrix of the evaluation process. We do this
1587 * even though the matrix is really sparse -- but we don't know
1588 * which entries are nonzero. Later, the `copy_from()` function
1589 * calls build a sparsity pattern and a sparse matrix from
1590 * the dense matrix.
1591 *
1592 * @code
1593 *   Vector<double> weights(dof_handler.n_dofs());
1594 *   FullMatrix<double> full_measurement_matrix(n_points_per_direction *
1595 *   n_points_per_direction,
1596 *   dof_handler.n_dofs());
1597 *  
1598 *   for (unsigned int index = 0; index < measurement_points.size(); ++index)
1599 *   {
1601 *   measurement_points[index],
1602 *   weights);
1603 *   for (unsigned int i = 0; i < dof_handler.n_dofs(); ++i)
1604 *   full_measurement_matrix(index, i) = weights(i);
1605 *   }
1606 *  
1607 *   measurement_sparsity.copy_from(full_measurement_matrix);
1608 *   measurement_matrix.reinit(measurement_sparsity);
1609 *   measurement_matrix.copy_from(full_measurement_matrix);
1610 *   }
1611 *  
1612 * @endcode
1613 *
1614 * Next build the mapping from cell to the index in the 64-element
1615 * coefficient vector:
1616 *
1617 * @code
1618 *   for (const auto &cell : triangulation.active_cell_iterators())
1619 *   {
1620 *   const unsigned int i = std::floor(cell->center()[0] * 8);
1621 *   const unsigned int j = std::floor(cell->center()[1] * 8);
1622 *  
1623 *   const unsigned int index = i + 8 * j;
1624 *  
1625 *   cell->set_user_index(index);
1626 *   }
1627 *  
1628 * @endcode
1629 *
1630 * Finally prebuild the building blocks of the linear system as
1631 * discussed in the Readme file :
1632 *
1633 * @code
1634 *   {
1635 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
1636 *  
1637 *   cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1638 *   cell_rhs.reinit(dofs_per_cell);
1639 *  
1640 *   const QGauss<dim> quadrature_formula(fe.degree+1);
1641 *   const unsigned int n_q_points = quadrature_formula.size();
1642 *  
1643 *   FEValues<dim> fe_values(fe,
1644 *   quadrature_formula,
1646 *   update_JxW_values);
1647 *  
1648 *   fe_values.reinit(dof_handler.begin_active());
1649 *  
1650 *   for (unsigned int q_index = 0; q_index < n_q_points; ++q_index)
1651 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1652 *   {
1653 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1654 *   cell_matrix(i, j) +=
1655 *   (fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
1656 *   fe_values.shape_grad(j, q_index) * // grad phi_j(x_q)
1657 *   fe_values.JxW(q_index)); // dx
1658 *  
1659 *   cell_rhs(i) += (fe_values.shape_value(i, q_index) * // phi_i(x_q)
1660 *   10.0 * // f(x_q)
1661 *   fe_values.JxW(q_index)); // dx
1662 *   }
1663 *  
1665 *   0,
1667 *   boundary_values);
1668 *   }
1669 *   }
1670 *  
1671 *  
1672 *  
1673 * @endcode
1674 *
1675 * Given that we have pre-built the matrix and right hand side contributions
1676 * for a (representative) cell, the function that assembles the matrix is
1677 * pretty short and straightforward:
1678 *
1679 * @code
1680 *   template <int dim>
1681 *   void PoissonSolver<dim>::assemble_system(const Vector<double> &coefficients)
1682 *   {
1683 *   Assert(coefficients.size() == 64, ExcInternalError());
1684 *  
1685 *   system_matrix = 0;
1686 *   system_rhs = 0;
1687 *  
1688 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
1689 *  
1690 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1691 *  
1692 *   for (const auto &cell : dof_handler.active_cell_iterators())
1693 *   {
1694 *   const double coefficient = coefficients(cell->user_index());
1695 *  
1696 *   cell->get_dof_indices(local_dof_indices);
1697 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1698 *   {
1699 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1700 *   system_matrix.add(local_dof_indices[i],
1701 *   local_dof_indices[j],
1702 *   coefficient * cell_matrix(i, j));
1703 *  
1704 *   system_rhs(local_dof_indices[i]) += cell_rhs(i);
1705 *   }
1706 *   }
1707 *  
1708 *   MatrixTools::apply_boundary_values(boundary_values,
1709 *   system_matrix,
1710 *   solution,
1711 *   system_rhs);
1712 *   }
1713 *  
1714 *  
1715 * @endcode
1716 *
1717 * The same is true for the function that solves the linear system:
1718 *
1719 * @code
1720 *   template <int dim>
1721 *   void PoissonSolver<dim>::solve()
1722 *   {
1723 *   SparseILU<double> ilu;
1724 *   ilu.initialize(system_matrix);
1725 *   SolverControl control(100, 1e-10*system_rhs.l2_norm());
1726 *   SolverCG<> solver(control);
1727 *   solver.solve(system_matrix, solution, system_rhs, ilu);
1728 *   }
1729 *  
1730 *  
1731 *  
1732 * @endcode
1733 *
1734 * The following function outputs graphical data for the most recently
1735 * used coefficient and corresponding solution of the PDE. Collecting
1736 * the coefficient values requires translating from the 64-element
1737 * coefficient vector and the cells that correspond to each of these
1738 * elements. The rest remains pretty obvious, with the exception
1739 * of including the number of the current sample into the file name.
1740 *
1741 * @code
1742 *   template <int dim>
1743 *   void
1744 *   PoissonSolver<dim>::output_results(const Vector<double> &coefficients) const
1745 *   {
1746 *   Vector<float> coefficient_values(triangulation.n_active_cells());
1747 *   for (const auto &cell : triangulation.active_cell_iterators())
1748 *   coefficient_values[cell->active_cell_index()] =
1749 *   coefficients(cell->user_index());
1750 *  
1751 *   DataOut<dim> data_out;
1752 *  
1753 *   data_out.attach_dof_handler(dof_handler);
1754 *   data_out.add_data_vector(solution, "solution");
1755 *   data_out.add_data_vector(coefficient_values, "coefficient");
1756 *  
1757 *   data_out.build_patches();
1758 *  
1759 *   std::ofstream output("solution-" +
1760 *   Utilities::int_to_string(nth_evaluation, 10) + ".vtu");
1761 *   data_out.write_vtu(output);
1762 *   }
1763 *  
1764 *  
1765 *  
1766 * @endcode
1767 *
1768 * The following is the main function of this class: Given a coefficient
1769 * vector, it assembles the linear system, solves it, and then evaluates
1770 * the solution at the measurement points by applying the measurement
1771 * matrix to the solution vector. That vector of "measured" values
1772 * is then returned.
1773 *
1774
1775 *
1776 * The function will also output the solution in a graphical format
1777 * if you un-comment the corresponding statement in the third
1778 * code block. However, you may end up with a very large amount
1779 * of data: This code is producing, at the minimum, 10,000 samples
1780 * and creating output for each one of them is surely more data
1781 * than you ever want to see!
1782 *
1783
1784 *
1785 * At the end of the function, we output some timing information
1786 * every 10,000 samples.
1787 *
1788 * @code
1789 *   template <int dim>
1790 *   Vector<double>
1791 *   PoissonSolver<dim>::evaluate(const Vector<double> &coefficients)
1792 *   {
1793 *   {
1794 *   TimerOutput::Scope section(timer, "Building linear systems");
1795 *   assemble_system(coefficients);
1796 *   }
1797 *  
1798 *   {
1799 *   TimerOutput::Scope section(timer, "Solving linear systems");
1800 *   solve();
1801 *   }
1802 *  
1803 *   Vector<double> measurements(measurement_matrix.m());
1804 *   {
1805 *   TimerOutput::Scope section(timer, "Postprocessing");
1806 *  
1807 *   measurement_matrix.vmult(measurements, solution);
1808 *   Assert(measurements.size() == measurement_points.size(),
1809 *   ExcInternalError());
1810 *  
1811 *   /* output_results(coefficients); */
1812 *   }
1813 *  
1814 *   ++nth_evaluation;
1815 *   if (nth_evaluation % 10000 == 0)
1816 *   timer.print_summary();
1817 *  
1818 *   return measurements;
1819 *   }
1820 *   } // namespace ForwardSimulator
1821 *  
1822 *  
1823 * @endcode
1824 *
1825 * The following namespaces define the statistical properties of the Bayesian
1826 * inverse problem. The first is about the definition of the measurement
1827 * statistics (the "likelihood"), which we here assume to be a normal
1828 * distribution @f$N(\mu,\sigma I)@f$ with mean value @f$\mu@f$ given by the
1829 * actual measurement vector (passed as an argument to the constructor
1830 * of the `Gaussian` class and standard deviation @f$\sigma@f$.
1831 *
1832
1833 *
1834 * For reasons of numerical accuracy, it is useful to not return the
1835 * actual likelihood, but its logarithm. This is because these
1836 * values can be very small, occasionally on the order of @f$e^{-100}@f$,
1837 * for which it becomes very difficult to compute accurate
1838 * values.
1839 *
1840 * @code
1841 *   namespace LogLikelihood
1842 *   {
1843 *   class Interface
1844 *   {
1845 *   public:
1846 *   virtual double log_likelihood(const Vector<double> &x) const = 0;
1847 *  
1848 *   virtual ~Interface() = default;
1849 *   };
1850 *  
1851 *  
1852 *   class Gaussian : public Interface
1853 *   {
1854 *   public:
1855 *   Gaussian(const Vector<double> &mu, const double sigma);
1856 *  
1857 *   virtual double log_likelihood(const Vector<double> &x) const override;
1858 *  
1859 *   private:
1860 *   const Vector<double> mu;
1861 *   const double sigma;
1862 *   };
1863 *  
1864 *   Gaussian::Gaussian(const Vector<double> &mu, const double sigma)
1865 *   : mu(mu)
1866 *   , sigma(sigma)
1867 *   {}
1868 *  
1869 *  
1870 *   double Gaussian::log_likelihood(const Vector<double> &x) const
1871 *   {
1872 *   Vector<double> x_minus_mu = x;
1873 *   x_minus_mu -= mu;
1874 *  
1875 *   return -x_minus_mu.norm_sqr() / (2 * sigma * sigma);
1876 *   }
1877 *   } // namespace LogLikelihood
1878 *  
1879 *  
1880 * @endcode
1881 *
1882 * Next up is the "prior" imposed on the coefficients. We assume
1883 * that the logarithms of the entries of the coefficient vector
1884 * are all distributed as a Gaussian with given mean and standard
1885 * deviation. If the logarithms of the coefficients are normally
1886 * distributed, then this implies in particular that the coefficients
1887 * can only be positive, which is a useful property to ensure the
1888 * well-posedness of the forward problem.
1889 *
1890
1891 *
1892 * For the same reasons as for the likelihood above, the interface
1893 * for the prior asks for returning the *logarithm* of the prior,
1894 * instead of the prior probability itself.
1895 *
1896 * @code
1897 *   namespace LogPrior
1898 *   {
1899 *   class Interface
1900 *   {
1901 *   public:
1902 *   virtual double log_prior(const Vector<double> &x) const = 0;
1903 *  
1904 *   virtual ~Interface() = default;
1905 *   };
1906 *  
1907 *  
1908 *   class LogGaussian : public Interface
1909 *   {
1910 *   public:
1911 *   LogGaussian(const double mu, const double sigma);
1912 *  
1913 *   virtual double log_prior(const Vector<double> &x) const override;
1914 *  
1915 *   private:
1916 *   const double mu;
1917 *   const double sigma;
1918 *   };
1919 *  
1920 *   LogGaussian::LogGaussian(const double mu, const double sigma)
1921 *   : mu(mu)
1922 *   , sigma(sigma)
1923 *   {}
1924 *  
1925 *  
1926 *   double LogGaussian::log_prior(const Vector<double> &x) const
1927 *   {
1928 *   double log_of_product = 0;
1929 *  
1930 *   for (const auto &el : x)
1931 *   log_of_product +=
1932 *   -(std::log(el) - mu) * (std::log(el) - mu) / (2 * sigma * sigma);
1933 *  
1934 *   return log_of_product;
1935 *   }
1936 *   } // namespace LogPrior
1937 *  
1938 *  
1939 *  
1940 * @endcode
1941 *
1942 * The Metropolis-Hastings algorithm requires a method to create a new sample
1943 * given a previous sample. We do this by perturbing the current (coefficient)
1944 * sample randomly using a Gaussian distribution centered at the current
1945 * sample. To ensure that the samples' individual entries all remain
1946 * positive, we use a Gaussian distribution in logarithm space -- in other
1947 * words, instead of *adding* a small perturbation with mean value zero,
1948 * we *multiply* the entries of the current sample by a factor that
1949 * is the exponential of a random number with mean zero. (Because the
1950 * exponential of zero is one, this means that the most likely factors
1951 * to multiply the existing sample entries by are close to one. And
1952 * because the exponential of a number is always positive, we never
1953 * get negative samples this way.)
1954 *
1955
1956 *
1957 * But the Metropolis-Hastings sampler doesn't just need a perturbed
1958 * sample @f$y@f$ location given the current sample location @f$x@f$. It also
1959 * needs to know the ratio of the probability of reaching @f$y@f$ from
1960 * @f$x@f$, divided by the probability of reaching @f$x@f$ from @f$y@f$. If we
1961 * were to use a symmetric proposal distribution (e.g., a Gaussian
1962 * distribution centered at @f$x@f$ with a width independent of @f$x@f$), then
1963 * these two probabilities would be the same, and the ratio one. But
1964 * that's not the case for the Gaussian in log space. It's not
1965 * terribly difficult to verify that in that case, for a single
1966 * component the ratio of these probabilities is @f$y_i/x_i@f$, and
1967 * consequently for all components of the vector together, the
1968 * probability is the product of these ratios.
1969 *
1970 * @code
1971 *   namespace ProposalGenerator
1972 *   {
1973 *   class Interface
1974 *   {
1975 *   public:
1976 *   virtual
1977 *   std::pair<Vector<double>,double>
1978 *   perturb(const Vector<double> &current_sample) const = 0;
1979 *  
1980 *   virtual ~Interface() = default;
1981 *   };
1982 *  
1983 *  
1984 *   class LogGaussian : public Interface
1985 *   {
1986 *   public:
1987 *   LogGaussian(const unsigned int random_seed, const double log_sigma);
1988 *  
1989 *   virtual
1990 *   std::pair<Vector<double>,double>
1991 *   perturb(const Vector<double> &current_sample) const override;
1992 *  
1993 *   private:
1994 *   const double log_sigma;
1995 *   mutable std::mt19937 random_number_generator;
1996 *   };
1997 *  
1998 *  
1999 *  
2000 *   LogGaussian::LogGaussian(const unsigned int random_seed,
2001 *   const double log_sigma)
2002 *   : log_sigma(log_sigma)
2003 *   {
2004 *   random_number_generator.seed(random_seed);
2005 *   }
2006 *  
2007 *  
2008 *   std::pair<Vector<double>,double>
2009 *   LogGaussian::perturb(const Vector<double> &current_sample) const
2010 *   {
2011 *   Vector<double> new_sample = current_sample;
2012 *   double product_of_ratios = 1;
2013 *   for (auto &x : new_sample)
2014 *   {
2015 *   const double rnd = std::normal_distribution<>(0, log_sigma)(random_number_generator);
2016 *   const double exp_rnd = std::exp(rnd);
2017 *   x *= exp_rnd;
2018 *   product_of_ratios *= exp_rnd;
2019 *   }
2020 *  
2021 *   return {new_sample, product_of_ratios};
2022 *   }
2023 *  
2024 *   } // namespace ProposalGenerator
2025 *  
2026 *  
2027 * @endcode
2028 *
2029 * The last main class is the Metropolis-Hastings sampler itself.
2030 * If you understand the algorithm behind this method, then
2031 * the following implementation should not be too difficult
2032 * to read. The only thing of relevance is that descriptions
2033 * of the algorithm typically ask whether the *ratio* of two
2034 * probabilities (the "posterior" probabilities of the current
2035 * and the previous samples, where the "posterior" is the product of the
2036 * likelihood and the prior probability) is larger or smaller than a
2037 * randomly drawn number. But because our interfaces return the
2038 * *logarithms* of these probabilities, we now need to take
2039 * the ratio of appropriate exponentials -- which is made numerically
2040 * more stable by considering the exponential of the difference of
2041 * the log probabilities. The only other slight complication is that
2042 * we need to multiply this ratio by the ratio of proposal probabilities
2043 * since we use a non-symmetric proposal distribution.
2044 *
2045
2046 *
2047 * Finally, we note that the output is generated with 7 digits of
2048 * accuracy. (The C++ default is 6 digits.) We do this because,
2049 * as shown in the paper, we can determine the mean value of the
2050 * probability distribution we are sampling here to at least six
2051 * digits of accuracy, and do not want to be limited by the precision
2052 * of the output.
2053 *
2054 * @code
2055 *   namespace Sampler
2056 *   {
2057 *   class MetropolisHastings
2058 *   {
2059 *   public:
2060 *   MetropolisHastings(ForwardSimulator::Interface & simulator,
2061 *   const LogLikelihood::Interface & likelihood,
2062 *   const LogPrior::Interface & prior,
2063 *   const ProposalGenerator::Interface &proposal_generator,
2064 *   const unsigned int random_seed,
2065 *   const std::string & dataset_name);
2066 *  
2067 *   void sample(const Vector<double> &starting_guess,
2068 *   const unsigned int n_samples);
2069 *  
2070 *   private:
2071 *   ForwardSimulator::Interface & simulator;
2072 *   const LogLikelihood::Interface & likelihood;
2073 *   const LogPrior::Interface & prior;
2074 *   const ProposalGenerator::Interface &proposal_generator;
2075 *  
2076 *   std::mt19937 random_number_generator;
2077 *  
2078 *   unsigned int sample_number;
2079 *   unsigned int accepted_sample_number;
2080 *  
2081 *   std::ofstream output_file;
2082 *  
2083 *   void write_sample(const Vector<double> &current_sample,
2084 *   const double current_log_likelihood);
2085 *   };
2086 *  
2087 *  
2088 *   MetropolisHastings::MetropolisHastings(
2089 *   ForwardSimulator::Interface & simulator,
2090 *   const LogLikelihood::Interface & likelihood,
2091 *   const LogPrior::Interface & prior,
2092 *   const ProposalGenerator::Interface &proposal_generator,
2093 *   const unsigned int random_seed,
2094 *   const std::string & dataset_name)
2095 *   : simulator(simulator)
2096 *   , likelihood(likelihood)
2097 *   , prior(prior)
2098 *   , proposal_generator(proposal_generator)
2099 *   , sample_number(0)
2100 *   , accepted_sample_number(0)
2101 *   {
2102 *   output_file.open("samples-" + dataset_name + ".txt");
2103 *   output_file.precision(7);
2104 *  
2105 *   random_number_generator.seed(random_seed);
2106 *   }
2107 *  
2108 *  
2109 *   void MetropolisHastings::sample(const Vector<double> &starting_guess,
2110 *   const unsigned int n_samples)
2111 *   {
2112 *   std::uniform_real_distribution<> uniform_distribution(0, 1);
2113 *  
2114 *   Vector<double> current_sample = starting_guess;
2115 *   double current_log_posterior =
2116 *   (likelihood.log_likelihood(simulator.evaluate(current_sample)) +
2117 *   prior.log_prior(current_sample));
2118 *  
2119 *   ++sample_number;
2120 *   ++accepted_sample_number;
2121 *   write_sample(current_sample, current_log_posterior);
2122 *  
2123 *   for (unsigned int k = 1; k < n_samples; ++k, ++sample_number)
2124 *   {
2125 *   std::pair<Vector<double>,double>
2126 *   perturbation = proposal_generator.perturb(current_sample);
2127 *   const Vector<double> trial_sample = std::move (perturbation.first);
2128 *   const double perturbation_probability_ratio = perturbation.second;
2129 *  
2130 *   const double trial_log_posterior =
2131 *   (likelihood.log_likelihood(simulator.evaluate(trial_sample)) +
2132 *   prior.log_prior(trial_sample));
2133 *  
2134 *   if (std::exp(trial_log_posterior - current_log_posterior) * perturbation_probability_ratio
2135 *   >=
2136 *   uniform_distribution(random_number_generator))
2137 *   {
2138 *   current_sample = trial_sample;
2139 *   current_log_posterior = trial_log_posterior;
2140 *  
2141 *   ++accepted_sample_number;
2142 *   }
2143 *  
2144 *   write_sample(current_sample, current_log_posterior);
2145 *   }
2146 *   }
2147 *  
2148 *  
2149 *  
2150 *   void MetropolisHastings::write_sample(const Vector<double> &current_sample,
2151 *   const double current_log_posterior)
2152 *   {
2153 *   output_file << current_log_posterior << '\t';
2154 *   output_file << accepted_sample_number << '\t';
2155 *   for (const auto &x : current_sample)
2156 *   output_file << x << ' ';
2157 *   output_file << '\n';
2158 *  
2159 *   output_file.flush();
2160 *   }
2161 *   } // namespace Sampler
2162 *  
2163 *  
2164 * @endcode
2165 *
2166 * The final function is `main()`, which simply puts all of these pieces
2167 * together into one. The "exact solution", i.e., the "measurement values"
2168 * we use for this program are tabulated to make it easier for other
2169 * people to use in their own implementations of this benchmark. These
2170 * values created using the same main class above, but using 8 mesh
2171 * refinements and using a Q3 element -- i.e., using a much more accurate
2172 * method than the one we use in the forward simulator for generating
2173 * samples below (which uses 5 global mesh refinement steps and a Q1
2174 * element). If you wanted to regenerate this set of numbers, then
2175 * the following code snippet would do that:
2176 * <div class=CodeFragmentInTutorialComment>
2177 * @code
2178 * /* Set the exact coefficient: */
2179 * Vector<double> exact_coefficients(64);
2180 * for (auto &el : exact_coefficients)
2181 * el = 1.;
2182 * exact_coefficients(9) = exact_coefficients(10) = exact_coefficients(17) =
2183 * exact_coefficients(18) = 0.1;
2184 * exact_coefficients(45) = exact_coefficients(46) = exact_coefficients(53) =
2185 * exact_coefficients(54) = 10.;
2186 *
2187
2188 * /* Compute the "correct" solution vector: */
2189 * const Vector<double> exact_solution =
2190 * ForwardSimulator::PoissonSolver<2>(/* global_refinements = */ 8,
2191 * /* fe_degree = */ 3,
2192 * /* prefix = */ "exact")
2193 * .evaluate(exact_coefficients);
2194 * @endcode
2195 * </div>
2196 *
2197 * @code
2198 *   int main()
2199 *   {
2200 *   const bool testing = true;
2201 *  
2202 * @endcode
2203 *
2204 * Run with one thread, so as to not step on other processes
2205 * doing the same at the same time. It turns out that the problem
2206 * is also so small that running with more than one thread
2207 * *increases* the runtime.
2208 *
2209 * @code
2211 *  
2212 *   const unsigned int random_seed = (testing ? 1U : std::random_device()());
2213 *   const std::string dataset_name = Utilities::to_string(random_seed, 10);
2214 *  
2215 *   const Vector<double> exact_solution(
2216 *   { 0.06076511762259369, 0.09601910120848481,
2217 *   0.1238852517838584, 0.1495184117375201,
2218 *   0.1841596127549784, 0.2174525028261122,
2219 *   0.2250996160898698, 0.2197954769002993,
2220 *   0.2074695698370926, 0.1889996477663016,
2221 *   0.1632722532153726, 0.1276782480038186,
2222 *   0.07711845915789312, 0.09601910120848552,
2223 *   0.2000589533367983, 0.3385592591951766,
2224 *   0.3934300024647806, 0.4040223892461541,
2225 *   0.4122329537843092, 0.4100480091545554,
2226 *   0.3949151637189968, 0.3697873264791232,
2227 *   0.33401826235924, 0.2850397806663382,
2228 *   0.2184260032478671, 0.1271121156350957,
2229 *   0.1238852517838611, 0.3385592591951819,
2230 *   0.7119285162766475, 0.8175712861756428,
2231 *   0.6836254116578105, 0.5779452419831157,
2232 *   0.5555615956136897, 0.5285181561736719,
2233 *   0.491439702849224, 0.4409367494853282,
2234 *   0.3730060082060772, 0.2821694983395214,
2235 *   0.1610176733857739, 0.1495184117375257,
2236 *   0.3934300024647929, 0.8175712861756562,
2237 *   0.9439154625527653, 0.8015904115095128,
2238 *   0.6859683749254024, 0.6561235366960599,
2239 *   0.6213197201867315, 0.5753611315000049,
2240 *   0.5140091754526823, 0.4325325506354165,
2241 *   0.3248315148915482, 0.1834600412730086,
2242 *   0.1841596127549917, 0.4040223892461832,
2243 *   0.6836254116578439, 0.8015904115095396,
2244 *   0.7870119561144977, 0.7373108331395808,
2245 *   0.7116558878070463, 0.6745179049094283,
2246 *   0.6235300574156917, 0.5559332704045935,
2247 *   0.4670304994474178, 0.3499809143811,
2248 *   0.19688263746294, 0.2174525028261253,
2249 *   0.4122329537843404, 0.5779452419831566,
2250 *   0.6859683749254372, 0.7373108331396063,
2251 *   0.7458811983178246, 0.7278968022406559,
2252 *   0.6904793535357751, 0.6369176452710288,
2253 *   0.5677443693743215, 0.4784738764865867,
2254 *   0.3602190632823262, 0.2031792054737325,
2255 *   0.2250996160898818, 0.4100480091545787,
2256 *   0.5555615956137137, 0.6561235366960938,
2257 *   0.7116558878070715, 0.727896802240657,
2258 *   0.7121928678670187, 0.6712187391428729,
2259 *   0.6139157775591492, 0.5478251665295381,
2260 *   0.4677122687599031, 0.3587654911000848,
2261 *   0.2050734291675918, 0.2197954769003094,
2262 *   0.3949151637190157, 0.5285181561736911,
2263 *   0.6213197201867471, 0.6745179049094407,
2264 *   0.690479353535786, 0.6712187391428787,
2265 *   0.6178408289359514, 0.5453605027237883,
2266 *   0.489575966490909, 0.4341716881061278,
2267 *   0.3534389974779456, 0.2083227496961347,
2268 *   0.207469569837099, 0.3697873264791366,
2269 *   0.4914397028492412, 0.5753611315000203,
2270 *   0.6235300574157017, 0.6369176452710497,
2271 *   0.6139157775591579, 0.5453605027237935,
2272 *   0.4336604929612851, 0.4109641743019312,
2273 *   0.3881864790111245, 0.3642640090182592,
2274 *   0.2179599909280145, 0.1889996477663011,
2275 *   0.3340182623592461, 0.4409367494853381,
2276 *   0.5140091754526943, 0.5559332704045969,
2277 *   0.5677443693743304, 0.5478251665295453,
2278 *   0.4895759664908982, 0.4109641743019171,
2279 *   0.395727260284338, 0.3778949322004734,
2280 *   0.3596268271857124, 0.2191250268948948,
2281 *   0.1632722532153683, 0.2850397806663325,
2282 *   0.373006008206081, 0.4325325506354207,
2283 *   0.4670304994474315, 0.4784738764866023,
2284 *   0.4677122687599041, 0.4341716881061055,
2285 *   0.388186479011099, 0.3778949322004602,
2286 *   0.3633362567187364, 0.3464457261905399,
2287 *   0.2096362321365655, 0.1276782480038148,
2288 *   0.2184260032478634, 0.2821694983395252,
2289 *   0.3248315148915535, 0.3499809143811097,
2290 *   0.3602190632823333, 0.3587654911000799,
2291 *   0.3534389974779268, 0.3642640090182283,
2292 *   0.35962682718569, 0.3464457261905295,
2293 *   0.3260728953424643, 0.180670595355394,
2294 *   0.07711845915789244, 0.1271121156350963,
2295 *   0.1610176733857757, 0.1834600412730144,
2296 *   0.1968826374629443, 0.2031792054737354,
2297 *   0.2050734291675885, 0.2083227496961245,
2298 *   0.2179599909279998, 0.2191250268948822,
2299 *   0.2096362321365551, 0.1806705953553887,
2300 *   0.1067965550010013 });
2301 *  
2302 * @endcode
2303 *
2304 * Now run the forward simulator for samples:
2305 *
2306 * @code
2307 *   ForwardSimulator::PoissonSolver<2> laplace_problem(
2308 *   /* global_refinements = */ 5,
2309 *   /* fe_degree = */ 1,
2310 *   dataset_name);
2311 *   LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
2312 *   LogPrior::LogGaussian log_prior(0, 2);
2313 *   ProposalGenerator::LogGaussian proposal_generator(
2314 *   random_seed, 0.09); /* so that the acceptance ratio is ~0.24 */
2315 *   Sampler::MetropolisHastings sampler(laplace_problem,
2316 *   log_likelihood,
2317 *   log_prior,
2318 *   proposal_generator,
2319 *   random_seed,
2320 *   dataset_name);
2321 *  
2322 *   Vector<double> starting_coefficients(64);
2323 *   for (auto &el : starting_coefficients)
2324 *   el = 1.;
2325 *   sampler.sample(starting_coefficients,
2326 *   (testing ? 250 * 40 /* takes 10 seconds */
2327 *   :
2328 *   100000000 /* takes 1.5 days */
2329 *   ));
2330 *   }
2331 * @endcode
2332
2333
2334*/
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void initialize(const SparseMatrix< somenumber > &matrix, const AdditionalData &parameters=AdditionalData())
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
real_type norm_sqr() const
Point< 3 > center
Point< 2 > first
Definition grid_out.cc:4615
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
void loop(ITERATOR begin, std_cxx20::type_identity_t< ITERATOR > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:439
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
Expression floor(const Expression &x)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:75
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:472
void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:189
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
VectorType::value_type * end(VectorType &V)
std::string to_string(const number value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:480
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:471
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
void create_point_source_vector(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const Point< spacedim, double > &p, Vector< double > &rhs_vector)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
STL namespace.
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation